summaryrefslogtreecommitdiff
path: root/target/linux/generic/pending-4.14
diff options
context:
space:
mode:
authorHauke Mehrtens <hauke@hauke-m.de>2017-10-29 19:32:10 +0100
committerHauke Mehrtens <hauke@hauke-m.de>2017-12-16 22:11:19 +0100
commitb3f95490b9bec020314eb32016988ee262d52884 (patch)
treef8289c19587108bb3b687575776f559eee6f01fd /target/linux/generic/pending-4.14
parenta362df6f253e4460504fd870d6a7af40ef86ad70 (diff)
downloadmtk-20170518-b3f95490b9bec020314eb32016988ee262d52884.zip
mtk-20170518-b3f95490b9bec020314eb32016988ee262d52884.tar.gz
mtk-20170518-b3f95490b9bec020314eb32016988ee262d52884.tar.bz2
kernel: generic: Add kernel 4.14 support
This adds initial support for kernel 4.14 based on the patches for kernel 4.9. In the configuration I deactivated some of the new possible security features like: CONFIG_REFCOUNT_FULL CONFIG_SLAB_FREELIST_HARDENED CONFIG_SOFTLOCKUP_DETECTOR CONFIG_WARN_ALL_UNSEEDED_RANDOM And these overlay FS options are also deactivated: CONFIG_OVERLAY_FS_INDEX CONFIG_OVERLAY_FS_REDIRECT_DIR I activated this: CONFIG_FORTIFY_SOURCE CONFIG_POSIX_TIMERS CONFIG_SLAB_MERGE_DEFAULT CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED I am not sure if I did the porting correct for the following patches: target/linux/generic/backport-4.14/020-backport_netfilter_rtcache.patch target/linux/generic/hack-4.14/220-gc_sections.patch target/linux/generic/hack-4.14/321-powerpc_crtsavres_prereq.patch target/linux/generic/pending-4.14/305-mips_module_reloc.patch target/linux/generic/pending-4.14/611-netfilter_match_bypass_default_table.patch target/linux/generic/pending-4.14/680-NET-skip-GRO-for-foreign-MAC-addresses.patch Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Diffstat (limited to 'target/linux/generic/pending-4.14')
-rw-r--r--target/linux/generic/pending-4.14/100-MIPS-fix-cache-flushing-for-highmem-pages.patch30
-rw-r--r--target/linux/generic/pending-4.14/110-ehci_hcd_ignore_oc.patch79
-rw-r--r--target/linux/generic/pending-4.14/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch82
-rw-r--r--target/linux/generic/pending-4.14/130-add-linux-spidev-compatible-si3210.patch18
-rw-r--r--target/linux/generic/pending-4.14/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch20
-rw-r--r--target/linux/generic/pending-4.14/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch62
-rw-r--r--target/linux/generic/pending-4.14/141-jffs2-add-RENAME_EXCHANGE-support.patch73
-rw-r--r--target/linux/generic/pending-4.14/150-bridge_allow_receiption_on_disabled_port.patch43
-rw-r--r--target/linux/generic/pending-4.14/160-mtd-part-add-generic-parsing-of-linux-part-probe.patch161
-rw-r--r--target/linux/generic/pending-4.14/180-net-phy-at803x-add-support-for-AT8032.patch70
-rw-r--r--target/linux/generic/pending-4.14/190-2-5-e1000e-Fix-wrong-comment-related-to-link-detection.patch43
-rw-r--r--target/linux/generic/pending-4.14/201-extra_optimization.patch32
-rw-r--r--target/linux/generic/pending-4.14/203-kallsyms_uncompressed.patch119
-rw-r--r--target/linux/generic/pending-4.14/205-backtrace_module_info.patch45
-rw-r--r--target/linux/generic/pending-4.14/206-mips-disable-vdso.patch23
-rw-r--r--target/linux/generic/pending-4.14/240-remove-unsane-filenames-from-deps_initramfs-list.patch46
-rw-r--r--target/linux/generic/pending-4.14/261-enable_wilink_platform_without_drivers.patch20
-rw-r--r--target/linux/generic/pending-4.14/270-uapi-kernel.h-glibc-specific-inclusion-of-sysinfo.h.patch32
-rw-r--r--target/linux/generic/pending-4.14/271-uapi-libc-compat.h-do-not-rely-on-__GLIBC__.patch105
-rw-r--r--target/linux/generic/pending-4.14/272-uapi-if_ether.h-prevent-redefinition-of-struct-ethhd.patch65
-rw-r--r--target/linux/generic/pending-4.14/300-mips_expose_boot_raw.patch40
-rw-r--r--target/linux/generic/pending-4.14/302-mips_no_branch_likely.patch22
-rw-r--r--target/linux/generic/pending-4.14/304-mips_disable_fpu.patch137
-rw-r--r--target/linux/generic/pending-4.14/305-mips_module_reloc.patch366
-rw-r--r--target/linux/generic/pending-4.14/306-mips_mem_functions_performance.patch106
-rw-r--r--target/linux/generic/pending-4.14/307-mips_highmem_offset.patch19
-rw-r--r--target/linux/generic/pending-4.14/308-mips32r2_tune.patch22
-rw-r--r--target/linux/generic/pending-4.14/310-arm_module_unresolved_weak_sym.patch22
-rw-r--r--target/linux/generic/pending-4.14/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch272
-rw-r--r--target/linux/generic/pending-4.14/332-arc-add-OWRTDTB-section.patch80
-rw-r--r--target/linux/generic/pending-4.14/333-arc-enable-unaligned-access-in-kernel-mode.patch24
-rw-r--r--target/linux/generic/pending-4.14/340-MIPS-mm-remove-mips_dma_mapping_error.patch32
-rw-r--r--target/linux/generic/pending-4.14/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch140
-rw-r--r--target/linux/generic/pending-4.14/400-mtd-add-rootfs-split-support.patch123
-rw-r--r--target/linux/generic/pending-4.14/401-mtd-add-support-for-different-partition-parser-types.patch110
-rw-r--r--target/linux/generic/pending-4.14/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch81
-rw-r--r--target/linux/generic/pending-4.14/403-mtd-hook-mtdsplit-to-Kbuild.patch32
-rw-r--r--target/linux/generic/pending-4.14/404-mtd-add-more-helper-functions.patch94
-rw-r--r--target/linux/generic/pending-4.14/411-mtd-partial_eraseblock_write.patch154
-rw-r--r--target/linux/generic/pending-4.14/412-mtd-partial_eraseblock_unlock.patch40
-rw-r--r--target/linux/generic/pending-4.14/420-mtd-redboot_space.patch41
-rw-r--r--target/linux/generic/pending-4.14/430-mtd-add-myloader-partition-parser.patch47
-rw-r--r--target/linux/generic/pending-4.14/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch67
-rw-r--r--target/linux/generic/pending-4.14/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch37
-rw-r--r--target/linux/generic/pending-4.14/440-block2mtd_init.patch116
-rw-r--r--target/linux/generic/pending-4.14/441-block2mtd_probe.patch47
-rw-r--r--target/linux/generic/pending-4.14/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch25
-rw-r--r--target/linux/generic/pending-4.14/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch17
-rw-r--r--target/linux/generic/pending-4.14/465-m25p80-mx-disable-software-protection.patch18
-rw-r--r--target/linux/generic/pending-4.14/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch56
-rw-r--r--target/linux/generic/pending-4.14/476-mtd-spi-nor-add-eon-en25q128.patch18
-rw-r--r--target/linux/generic/pending-4.14/477-mtd-add-spi-nor-add-mx25u3235f.patch18
-rw-r--r--target/linux/generic/pending-4.14/480-mtd-set-rootfs-to-be-root-dev.patch38
-rw-r--r--target/linux/generic/pending-4.14/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch73
-rw-r--r--target/linux/generic/pending-4.14/491-ubi-auto-create-ubiblock-device-for-rootfs.patch66
-rw-r--r--target/linux/generic/pending-4.14/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch51
-rw-r--r--target/linux/generic/pending-4.14/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch34
-rw-r--r--target/linux/generic/pending-4.14/494-mtd-ubi-add-EOF-marker-support.patch60
-rw-r--r--target/linux/generic/pending-4.14/530-jffs2_make_lzma_available.patch5180
-rw-r--r--target/linux/generic/pending-4.14/532-jffs2_eofdetect.patch65
-rw-r--r--target/linux/generic/pending-4.14/551-ubifs-fix-default-compression-selection.patch37
-rw-r--r--target/linux/generic/pending-4.14/600-netfilter_conntrack_flush.patch95
-rw-r--r--target/linux/generic/pending-4.14/610-netfilter_match_bypass_default_checks.patch110
-rw-r--r--target/linux/generic/pending-4.14/611-netfilter_match_bypass_default_table.patch111
-rw-r--r--target/linux/generic/pending-4.14/612-netfilter_match_reduce_memory_access.patch22
-rw-r--r--target/linux/generic/pending-4.14/613-netfilter_optional_tcp_window_check.patch44
-rw-r--r--target/linux/generic/pending-4.14/616-net_optimize_xfrm_calls.patch20
-rw-r--r--target/linux/generic/pending-4.14/630-packet_socket_type.patch138
-rw-r--r--target/linux/generic/pending-4.14/650-pppoe_header_pad.patch29
-rw-r--r--target/linux/generic/pending-4.14/655-increase_skb_pad.patch20
-rw-r--r--target/linux/generic/pending-4.14/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch500
-rw-r--r--target/linux/generic/pending-4.14/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch247
-rw-r--r--target/linux/generic/pending-4.14/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch50
-rw-r--r--target/linux/generic/pending-4.14/680-NET-skip-GRO-for-foreign-MAC-addresses.patch145
-rw-r--r--target/linux/generic/pending-4.14/681-NET-add-of_get_mac_address_mtd.patch127
-rw-r--r--target/linux/generic/pending-4.14/701-phy_extension.patch95
-rw-r--r--target/linux/generic/pending-4.14/703-phy-add-detach-callback-to-struct-phy_driver.patch38
-rw-r--r--target/linux/generic/pending-4.14/734-net-phy-at803x-allow-to-configure-via-pdata.patch142
-rw-r--r--target/linux/generic/pending-4.14/735-net-phy-at803x-fix-at8033-sgmii-mode.patch54
-rw-r--r--target/linux/generic/pending-4.14/810-pci_disable_common_quirks.patch60
-rw-r--r--target/linux/generic/pending-4.14/811-pci_disable_usb_common_quirks.patch113
-rw-r--r--target/linux/generic/pending-4.14/821-usb-Remove-annoying-warning-about-bogus-URB.patch76
-rw-r--r--target/linux/generic/pending-4.14/831-ledtrig_netdev.patch74
-rw-r--r--target/linux/generic/pending-4.14/834-ledtrig-libata.patch149
-rw-r--r--target/linux/generic/pending-4.14/920-mangle_bootargs.patch71
85 files changed, 11725 insertions, 0 deletions
diff --git a/target/linux/generic/pending-4.14/100-MIPS-fix-cache-flushing-for-highmem-pages.patch b/target/linux/generic/pending-4.14/100-MIPS-fix-cache-flushing-for-highmem-pages.patch
new file mode 100644
index 0000000..b1c65f7
--- /dev/null
+++ b/target/linux/generic/pending-4.14/100-MIPS-fix-cache-flushing-for-highmem-pages.patch
@@ -0,0 +1,30 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: MIPS: fix cache flushing for highmem pages
+
+Most cache flush ops were no-op for highmem pages. This led to nasty
+segfaults and (in the case of page_address(page) == NULL) kernel
+crashes.
+
+Fix this by always flushing highmem pages using kmap/kunmap_atomic
+around the actual cache flush. This might be a bit inefficient, but at
+least it's stable.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -116,6 +116,13 @@ void __flush_anon_page(struct page *page
+ {
+ unsigned long addr = (unsigned long) page_address(page);
+
++ if (PageHighMem(page)) {
++ addr = (unsigned long)kmap_atomic(page);
++ flush_data_cache_page(addr);
++ __kunmap_atomic((void *)addr);
++ return;
++ }
++
+ if (pages_do_alias(addr, vmaddr)) {
+ if (page_mapcount(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
diff --git a/target/linux/generic/pending-4.14/110-ehci_hcd_ignore_oc.patch b/target/linux/generic/pending-4.14/110-ehci_hcd_ignore_oc.patch
new file mode 100644
index 0000000..b45b1c0
--- /dev/null
+++ b/target/linux/generic/pending-4.14/110-ehci_hcd_ignore_oc.patch
@@ -0,0 +1,79 @@
+From: Florian Fainelli <florian@openwrt.org>
+Subject: USB: EHCI: add ignore_oc flag to disable overcurrent checking
+
+This patch adds an ignore_oc flag which can be set by EHCI controller
+not supporting or wanting to disable overcurrent checking. The EHCI
+platform data in include/linux/usb/ehci_pdriver.h is also augmented to
+take advantage of this new flag.
+
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+---
+ drivers/usb/host/ehci-hcd.c | 2 +-
+ drivers/usb/host/ehci-hub.c | 4 ++--
+ drivers/usb/host/ehci-platform.c | 1 +
+ drivers/usb/host/ehci.h | 1 +
+ include/linux/usb/ehci_pdriver.h | 1 +
+ 5 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -651,7 +651,7 @@ static int ehci_run (struct usb_hcd *hcd
+ "USB %x.%x started, EHCI %x.%02x%s\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+ temp >> 8, temp & 0xff,
+- ignore_oc ? ", overcurrent ignored" : "");
++ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
+
+ ehci_writel(ehci, INTR_MASK,
+ &ehci->regs->intr_enable); /* Turn On Interrupts */
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -646,7 +646,7 @@ ehci_hub_status_data (struct usb_hcd *hc
+ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+ * PORT_POWER; that's surprising, but maybe within-spec.
+ */
+- if (!ignore_oc)
++ if (!ignore_oc && !ehci->ignore_oc)
+ mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ else
+ mask = PORT_CSC | PORT_PEC;
+@@ -1016,7 +1016,7 @@ int ehci_hub_control(
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+- if ((temp & PORT_OCC) && !ignore_oc){
++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /*
+--- a/drivers/usb/host/ehci-platform.c
++++ b/drivers/usb/host/ehci-platform.c
+@@ -263,6 +263,8 @@ static int ehci_platform_probe(struct pl
+ hcd->has_tt = 1;
+ if (pdata->reset_on_resume)
+ priv->reset_on_resume = true;
++ if (pdata->ignore_oc)
++ ehci->ignore_oc = 1;
+
+ #ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -231,6 +231,7 @@ struct ehci_hcd { /* one per controlle
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
++ unsigned ignore_oc:1;
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
+--- a/include/linux/usb/ehci_pdriver.h
++++ b/include/linux/usb/ehci_pdriver.h
+@@ -49,6 +49,7 @@ struct usb_ehci_pdata {
+ unsigned no_io_watchdog:1;
+ unsigned reset_on_resume:1;
+ unsigned dma_mask_64:1;
++ unsigned ignore_oc:1;
+
+ /* Turn on all power and clocks */
+ int (*power_on)(struct platform_device *pdev);
diff --git a/target/linux/generic/pending-4.14/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch b/target/linux/generic/pending-4.14/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch
new file mode 100644
index 0000000..fbf9981
--- /dev/null
+++ b/target/linux/generic/pending-4.14/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch
@@ -0,0 +1,82 @@
+From: Tobias Wolf <dev-NTEO@vplace.de>
+Subject: mm: Fix alloc_node_mem_map with ARCH_PFN_OFFSET calculation
+
+An rt288x (ralink) based router (Belkin F5D8235 v1) does not boot with any
+kernel beyond version 4.3 resulting in:
+
+BUG: Bad page state in process swapper pfn:086ac
+
+bisect resulted in:
+
+a1c34a3bf00af2cede839879502e12dc68491ad5 is the first bad commit
+commit a1c34a3bf00af2cede839879502e12dc68491ad5
+Author: Laura Abbott <laura@labbott.name>
+Date: Thu Nov 5 18:48:46 2015 -0800
+
+ mm: Don't offset memmap for flatmem
+
+ Srinivas Kandagatla reported bad page messages when trying to remove the
+ bottom 2MB on an ARM based IFC6410 board
+
+ BUG: Bad page state in process swapper pfn:fffa8
+ page:ef7fb500 count:0 mapcount:0 mapping: (null) index:0x0
+ flags: 0x96640253(locked|error|dirty|active|arch_1|reclaim|mlocked)
+ page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set
+ bad because of flags:
+ flags: 0x200041(locked|active|mlocked)
+ Modules linked in:
+ CPU: 0 PID: 0 Comm: swapper Not tainted 3.19.0-rc3-00007-g412f9ba-dirty
+#816
+ Hardware name: Qualcomm (Flattened Device Tree)
+ unwind_backtrace
+ show_stack
+ dump_stack
+ bad_page
+ free_pages_prepare
+ free_hot_cold_page
+ __free_pages
+ free_highmem_page
+ mem_init
+ start_kernel
+ Disabling lock debugging due to kernel taint
+ [...]
+:040000 040000 2de013c372345fd471cd58f0553c9b38b0ef1cc4
+0a8156f848733dfa21e16c196dfb6c0a76290709 M mm
+
+This fix for ARM does not account ARCH_PFN_OFFSET for mem_map as later used by
+page_to_pfn anymore.
+
+The following output was generated with two hacked in printk statements:
+
+printk("before %p vs. %p or %p\n", mem_map, mem_map - offset, mem_map -
+(pgdat->node_start_pfn - ARCH_PFN_OFFSET));
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+printk("after %p\n", mem_map);
+
+Output:
+
+[ 0.000000] before 8861b280 vs. 8861b280 or 8851b280
+[ 0.000000] after 8851b280
+
+As seen in the first line mem_map with subtraction of offset does not equal the
+mem_map after subtraction of ARCH_PFN_OFFSET.
+
+After adding the offset of ARCH_PFN_OFFSET as well to mem_map as the
+previously calculated offset is zero for the named platform it is able to boot
+4.4 and 4.9-rc7 again.
+
+Signed-off-by: Tobias Wolf <dev-NTEO@vplace.de>
+---
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6170,7 +6170,7 @@ static void __ref alloc_node_mem_map(str
+ mem_map = NODE_DATA(0)->node_mem_map;
+ #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+- mem_map -= offset;
++ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+ }
+ #endif
diff --git a/target/linux/generic/pending-4.14/130-add-linux-spidev-compatible-si3210.patch b/target/linux/generic/pending-4.14/130-add-linux-spidev-compatible-si3210.patch
new file mode 100644
index 0000000..b00fb8e
--- /dev/null
+++ b/target/linux/generic/pending-4.14/130-add-linux-spidev-compatible-si3210.patch
@@ -0,0 +1,18 @@
+From: Giuseppe Lippolis <giu.lippolis@gmail.com>
+Subject: Add the linux,spidev compatible in spidev Several device in ramips have this binding in the dts
+
+Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com>
+---
+ drivers/spi/spidev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -669,6 +669,7 @@ static const struct of_device_id spidev_
+ { .compatible = "lineartechnology,ltc2488" },
+ { .compatible = "ge,achc" },
+ { .compatible = "semtech,sx1301" },
++ { .compatible = "siliconlabs,si3210" },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/target/linux/generic/pending-4.14/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch b/target/linux/generic/pending-4.14/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch
new file mode 100644
index 0000000..9603385
--- /dev/null
+++ b/target/linux/generic/pending-4.14/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: spi: use gpio_set_value_cansleep for setting chipselect GPIO
+
+Sleeping is safe inside spi_transfer_one_message, and some GPIO chips
+need to sleep for setting values
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -729,7 +729,7 @@ static void spi_set_cs(struct spi_device
+ enable = !enable;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+- gpio_set_value(spi->cs_gpio, !enable);
++ gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ /* Some SPI masters need both GPIO CS & slave_select */
+ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+ spi->controller->set_cs)
diff --git a/target/linux/generic/pending-4.14/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch b/target/linux/generic/pending-4.14/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch
new file mode 100644
index 0000000..b9bb3f7
--- /dev/null
+++ b/target/linux/generic/pending-4.14/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch
@@ -0,0 +1,62 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: use .rename2 and add RENAME_WHITEOUT support
+
+It is required for renames on overlayfs
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -756,6 +756,24 @@ static int jffs2_mknod (struct inode *di
+ return ret;
+ }
+
++static int jffs2_whiteout (struct inode *old_dir, struct dentry *old_dentry)
++{
++ struct dentry *wh;
++ int err;
++
++ wh = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
++ if (!wh)
++ return -ENOMEM;
++
++ err = jffs2_mknod(old_dir, wh, S_IFCHR | WHITEOUT_MODE,
++ WHITEOUT_DEV);
++ if (err)
++ return err;
++
++ d_rehash(wh);
++ return 0;
++}
++
+ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+ struct inode *new_dir_i, struct dentry *new_dentry,
+ unsigned int flags)
+@@ -766,7 +784,7 @@ static int jffs2_rename (struct inode *o
+ uint8_t type;
+ uint32_t now;
+
+- if (flags & ~RENAME_NOREPLACE)
++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT))
+ return -EINVAL;
+
+ /* The VFS will check for us and prevent trying to rename a
+@@ -832,9 +850,14 @@ static int jffs2_rename (struct inode *o
+ if (d_is_dir(old_dentry) && !victim_f)
+ inc_nlink(new_dir_i);
+
+- /* Unlink the original */
+- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
++ if (flags & RENAME_WHITEOUT)
++ /* Replace with whiteout */
++ ret = jffs2_whiteout(old_dir_i, old_dentry);
++ else
++ /* Unlink the original */
++ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
++ old_dentry->d_name.name,
++ old_dentry->d_name.len, NULL, now);
+
+ /* We don't touch inode->i_nlink */
+
diff --git a/target/linux/generic/pending-4.14/141-jffs2-add-RENAME_EXCHANGE-support.patch b/target/linux/generic/pending-4.14/141-jffs2-add-RENAME_EXCHANGE-support.patch
new file mode 100644
index 0000000..4b30bc7
--- /dev/null
+++ b/target/linux/generic/pending-4.14/141-jffs2-add-RENAME_EXCHANGE-support.patch
@@ -0,0 +1,73 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: add RENAME_EXCHANGE support
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -781,18 +781,31 @@ static int jffs2_rename (struct inode *o
+ int ret;
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
+ struct jffs2_inode_info *victim_f = NULL;
++ struct inode *fst_inode = d_inode(old_dentry);
++ struct inode *snd_inode = d_inode(new_dentry);
+ uint8_t type;
+ uint32_t now;
+
+- if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT))
++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT|RENAME_EXCHANGE))
+ return -EINVAL;
+
++ if ((flags & RENAME_EXCHANGE) && (old_dir_i != new_dir_i)) {
++ if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
++ inc_nlink(new_dir_i);
++ drop_nlink(old_dir_i);
++ }
++ else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) {
++ drop_nlink(new_dir_i);
++ inc_nlink(old_dir_i);
++ }
++ }
++
+ /* The VFS will check for us and prevent trying to rename a
+ * file over a directory and vice versa, but if it's a directory,
+ * the VFS can't check whether the victim is empty. The filesystem
+ * needs to do that for itself.
+ */
+- if (d_really_is_positive(new_dentry)) {
++ if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) {
+ victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
+ if (d_is_dir(new_dentry)) {
+ struct jffs2_full_dirent *fd;
+@@ -827,7 +840,7 @@ static int jffs2_rename (struct inode *o
+ if (ret)
+ return ret;
+
+- if (victim_f) {
++ if (victim_f && !(flags & RENAME_EXCHANGE)) {
+ /* There was a victim. Kill it off nicely */
+ if (d_is_dir(new_dentry))
+ clear_nlink(d_inode(new_dentry));
+@@ -853,6 +866,12 @@ static int jffs2_rename (struct inode *o
+ if (flags & RENAME_WHITEOUT)
+ /* Replace with whiteout */
+ ret = jffs2_whiteout(old_dir_i, old_dentry);
++ else if (flags & RENAME_EXCHANGE)
++ /* Replace the original */
++ ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i),
++ d_inode(new_dentry)->i_ino, type,
++ old_dentry->d_name.name, old_dentry->d_name.len,
++ now);
+ else
+ /* Unlink the original */
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+@@ -884,7 +903,7 @@ static int jffs2_rename (struct inode *o
+ return ret;
+ }
+
+- if (d_is_dir(old_dentry))
++ if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE))
+ drop_nlink(old_dir_i);
+
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
diff --git a/target/linux/generic/pending-4.14/150-bridge_allow_receiption_on_disabled_port.patch b/target/linux/generic/pending-4.14/150-bridge_allow_receiption_on_disabled_port.patch
new file mode 100644
index 0000000..0326731
--- /dev/null
+++ b/target/linux/generic/pending-4.14/150-bridge_allow_receiption_on_disabled_port.patch
@@ -0,0 +1,43 @@
+From: Stephen Hemminger <stephen@networkplumber.org>
+Subject: bridge: allow receiption on disabled port
+
+When an ethernet device is enslaved to a bridge, and the bridge STP
+detects loss of carrier (or operational state down), then normally
+packet receiption is blocked.
+
+This breaks control applications like WPA which maybe expecting to
+receive packets to negotiate to bring link up. The bridge needs to
+block forwarding packets from these disabled ports, but there is no
+hard requirement to not allow local packet delivery.
+
+Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -238,7 +238,8 @@ static int br_handle_local_finish(struct
+ {
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+- __br_handle_local_finish(skb);
++ if (p->state != BR_STATE_DISABLED)
++ __br_handle_local_finish(skb);
+
+ BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
+ br_pass_frame_up(skb);
+@@ -326,6 +327,15 @@ rx_handler_result_t br_handle_frame(stru
+
+ forward:
+ switch (p->state) {
++ case BR_STATE_DISABLED:
++ if (ether_addr_equal(p->br->dev->dev_addr, dest))
++ skb->pkt_type = PACKET_HOST;
++
++ NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
++ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
++ br_handle_local_finish);
++ break;
++
+ case BR_STATE_FORWARDING:
+ rhook = rcu_dereference(br_should_route_hook);
+ if (rhook) {
diff --git a/target/linux/generic/pending-4.14/160-mtd-part-add-generic-parsing-of-linux-part-probe.patch b/target/linux/generic/pending-4.14/160-mtd-part-add-generic-parsing-of-linux-part-probe.patch
new file mode 100644
index 0000000..2706f13
--- /dev/null
+++ b/target/linux/generic/pending-4.14/160-mtd-part-add-generic-parsing-of-linux-part-probe.patch
@@ -0,0 +1,161 @@
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Subject: mtd: part: add generic parsing of linux,part-probe
+
+This moves the linux,part-probe device tree parsing code from
+physmap_of.c to mtdpart.c. Now all drivers can use this feature by just
+providing a reference to their device tree node in struct
+mtd_part_parser_data.
+
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+---
+ Documentation/devicetree/bindings/mtd/nand.txt | 16 +++++++++
+ drivers/mtd/maps/physmap_of.c | 46 +-------------------------
+ drivers/mtd/mtdpart.c | 45 +++++++++++++++++++++++++
+ 3 files changed, 62 insertions(+), 45 deletions(-)
+
+--- a/Documentation/devicetree/bindings/mtd/nand.txt
++++ b/Documentation/devicetree/bindings/mtd/nand.txt
+@@ -44,6 +44,22 @@ Optional NAND chip properties:
+ used by the upper layers, and you want to make your NAND
+ as reliable as possible.
+
++- linux,part-probe: list of name as strings of the partition parser
++ which should be used to parse the partition table.
++ They will be tried in the specified ordering and
++ the next one will be used if the previous one
++ failed.
++
++ Example: linux,part-probe = "cmdlinepart", "ofpart";
++
++ This is also the default value, which will be used
++ if this attribute is not specified. It could be
++ that the flash driver in use overwrote the default
++ value and uses some other default.
++
++ Possible values are: bcm47xxpart, afs, ar7part,
++ ofoldpart, ofpart, bcm63xxpart, RedBoot, cmdlinepart
++
+ The ECC strength and ECC step size properties define the correction capability
+ of a controller. Together, they say a controller can correct "{strength} bit
+ errors per {size} bytes".
+--- a/drivers/mtd/maps/physmap_of_core.c
++++ b/drivers/mtd/maps/physmap_of_core.c
+@@ -114,37 +114,9 @@ static struct mtd_info *obsolete_probe(s
+ static const char * const part_probe_types_def[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
+
+-static const char * const *of_get_probes(struct device_node *dp)
+-{
+- const char **res;
+- int count;
+-
+- count = of_property_count_strings(dp, "linux,part-probe");
+- if (count < 0)
+- return part_probe_types_def;
+-
+- res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL);
+- if (!res)
+- return NULL;
+-
+- count = of_property_read_string_array(dp, "linux,part-probe", res,
+- count);
+- if (count < 0)
+- return NULL;
+-
+- return res;
+-}
+-
+-static void of_free_probes(const char * const *probes)
+-{
+- if (probes != part_probe_types_def)
+- kfree(probes);
+-}
+-
+ static const struct of_device_id of_flash_match[];
+ static int of_flash_probe(struct platform_device *dev)
+ {
+- const char * const *part_probe_types;
+ const struct of_device_id *match;
+ struct device_node *dp = dev->dev.of_node;
+ struct resource res;
+@@ -310,14 +282,8 @@ static int of_flash_probe(struct platfor
+
+ info->cmtd->dev.parent = &dev->dev;
+ mtd_set_of_node(info->cmtd, dp);
+- part_probe_types = of_get_probes(dp);
+- if (!part_probe_types) {
+- err = -ENOMEM;
+- goto err_out;
+- }
+- mtd_device_parse_register(info->cmtd, part_probe_types, NULL,
++ mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL,
+ NULL, 0);
+- of_free_probes(part_probe_types);
+
+ kfree(mtd_list);
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -29,6 +29,7 @@
+ #include <linux/kmod.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/of.h>
+ #include <linux/err.h>
+
+ #include "mtdcore.h"
+@@ -863,6 +864,32 @@ void deregister_mtd_parser(struct mtd_pa
+ EXPORT_SYMBOL_GPL(deregister_mtd_parser);
+
+ /*
++ * Parses the linux,part-probe device tree property.
++ * When a non null value is returned it has to be freed with kfree() by
++ * the caller.
++ */
++static const char * const *of_get_probes(struct device_node *dp)
++{
++ const char **res;
++ int count;
++
++ count = of_property_count_strings(dp, "linux,part-probe");
++ if (count < 0)
++ return NULL;
++
++ res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL);
++ if (!res)
++ return NULL;
++
++ count = of_property_read_string_array(dp, "linux,part-probe", res,
++ count);
++ if (count < 0)
++ return NULL;
++
++ return res;
++}
++
++/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+@@ -920,6 +947,13 @@ int parse_mtd_partitions(struct mtd_info
+ {
+ struct mtd_part_parser *parser;
+ int ret, err = 0;
++ const char *const *types_of = NULL;
++
++ if (mtd_get_of_node(master)) {
++ types_of = of_get_probes(mtd_get_of_node(master));
++ if (types_of != NULL)
++ types = types_of;
++ }
+
+ if (!types)
+ types = default_mtd_part_types;
+@@ -945,6 +979,7 @@ int parse_mtd_partitions(struct mtd_info
+ if (ret < 0 && !err)
+ err = ret;
+ }
++ kfree(types_of);
+ return err;
+ }
+
diff --git a/target/linux/generic/pending-4.14/180-net-phy-at803x-add-support-for-AT8032.patch b/target/linux/generic/pending-4.14/180-net-phy-at803x-add-support-for-AT8032.patch
new file mode 100644
index 0000000..3b1a41a
--- /dev/null
+++ b/target/linux/generic/pending-4.14/180-net-phy-at803x-add-support-for-AT8032.patch
@@ -0,0 +1,70 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: phy: at803x: add support for AT8032
+
+Like AT8030, this PHY needs the GPIO reset workaround
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -62,6 +62,7 @@
+
+ #define ATH8030_PHY_ID 0x004dd076
+ #define ATH8031_PHY_ID 0x004dd074
++#define ATH8032_PHY_ID 0x004dd023
+ #define ATH8035_PHY_ID 0x004dd072
+ #define AT803X_PHY_ID_MASK 0xffffffef
+
+@@ -260,7 +261,8 @@ static int at803x_probe(struct phy_devic
+ if (!priv)
+ return -ENOMEM;
+
+- if (phydev->drv->phy_id != ATH8030_PHY_ID)
++ if (phydev->drv->phy_id != ATH8030_PHY_ID &&
++ phydev->drv->phy_id != ATH8032_PHY_ID)
+ goto does_not_require_reset_workaround;
+
+ gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+@@ -336,7 +338,7 @@ static void at803x_link_change_notify(st
+ struct at803x_priv *priv = phydev->priv;
+
+ /*
+- * Conduct a hardware reset for AT8030 every time a link loss is
++ * Conduct a hardware reset for AT8030/2 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+@@ -448,6 +450,24 @@ static struct phy_driver at803x_driver[]
+ .aneg_done = at803x_aneg_done,
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
++}, {
++ /* ATHEROS 8032 */
++ .phy_id = ATH8032_PHY_ID,
++ .name = "Atheros 8032 ethernet",
++ .phy_id_mask = 0xffffffef,
++ .probe = at803x_probe,
++ .config_init = at803x_config_init,
++ .link_change_notify = at803x_link_change_notify,
++ .set_wol = at803x_set_wol,
++ .get_wol = at803x_get_wol,
++ .suspend = at803x_suspend,
++ .resume = at803x_resume,
++ .features = PHY_BASIC_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .config_aneg = genphy_config_aneg,
++ .read_status = genphy_read_status,
++ .ack_interrupt = at803x_ack_interrupt,
++ .config_intr = at803x_config_intr,
+ } };
+
+ module_phy_driver(at803x_driver);
+@@ -455,6 +475,7 @@ module_phy_driver(at803x_driver);
+ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
+ { ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
++ { ATH8032_PHY_ID, AT803X_PHY_ID_MASK },
+ { ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
+ { }
+ };
diff --git a/target/linux/generic/pending-4.14/190-2-5-e1000e-Fix-wrong-comment-related-to-link-detection.patch b/target/linux/generic/pending-4.14/190-2-5-e1000e-Fix-wrong-comment-related-to-link-detection.patch
new file mode 100644
index 0000000..d8aea32
--- /dev/null
+++ b/target/linux/generic/pending-4.14/190-2-5-e1000e-Fix-wrong-comment-related-to-link-detection.patch
@@ -0,0 +1,43 @@
+From patchwork Fri Jul 21 18:36:24 2017
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [2/5] e1000e: Fix wrong comment related to link detection
+From: Benjamin Poirier <bpoirier@suse.com>
+X-Patchwork-Id: 9857489
+Message-Id: <20170721183627.13373-2-bpoirier@suse.com>
+To: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Cc: Lennart Sorensen <lsorense@csclub.uwaterloo.ca>,
+ intel-wired-lan@lists.osuosl.org, netdev@vger.kernel.org,
+ linux-kernel@vger.kernel.org
+Date: Fri, 21 Jul 2017 11:36:24 -0700
+
+Reading e1000e_check_for_copper_link() shows that get_link_status is set to
+false after link has been detected. Therefore, it stays TRUE until then.
+
+Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+---
+ drivers/net/ethernet/intel/e1000e/netdev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5096,7 +5096,7 @@ static bool e1000e_has_link(struct e1000
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * Rx sequence error interrupt. get_link_status will stay
+- * false until the check_for_link establishes link
++ * true until the check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+@@ -5114,7 +5114,7 @@ static bool e1000e_has_link(struct e1000
+ break;
+ case e1000_media_type_internal_serdes:
+ ret_val = hw->mac.ops.check_for_link(hw);
+- link_active = adapter->hw.mac.serdes_has_link;
++ link_active = hw->mac.serdes_has_link;
+ break;
+ default:
+ case e1000_media_type_unknown:
diff --git a/target/linux/generic/pending-4.14/201-extra_optimization.patch b/target/linux/generic/pending-4.14/201-extra_optimization.patch
new file mode 100644
index 0000000..7188375
--- /dev/null
+++ b/target/linux/generic/pending-4.14/201-extra_optimization.patch
@@ -0,0 +1,32 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: Upgrade to Linux 2.6.19
+
+- Includes large parts of the patch from #1021 by dpalffy
+- Includes RB532 NAND driver changes by n0-1
+
+[john@phrozen.org: feix will add this to his upstream queue]
+
+lede-commit: bff468813f78f81e36ebb2a3f4354de7365e640f
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ Makefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -637,12 +637,12 @@ KBUILD_CFLAGS += $(call cc-disable-warni
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
+-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
+ else
+ ifdef CONFIG_PROFILE_ALL_BRANCHES
+-KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
+ else
+-KBUILD_CFLAGS += -O2
++KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
+ endif
+ endif
+
diff --git a/target/linux/generic/pending-4.14/203-kallsyms_uncompressed.patch b/target/linux/generic/pending-4.14/203-kallsyms_uncompressed.patch
new file mode 100644
index 0000000..fa68f30
--- /dev/null
+++ b/target/linux/generic/pending-4.14/203-kallsyms_uncompressed.patch
@@ -0,0 +1,119 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a config option for keeping the kallsyms table uncompressed, saving ~9kb kernel size after lzma on ar71xx
+
+[john@phrozen.org: added to my upstream queue 30.12.2016]
+lede-commit: e0e3509b5ce2ccf93d4d67ea907613f5f7ec2eed
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ init/Kconfig | 11 +++++++++++
+ kernel/kallsyms.c | 8 ++++++++
+ scripts/kallsyms.c | 12 ++++++++++++
+ scripts/link-vmlinux.sh | 4 ++++
+ 4 files changed, 35 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1081,6 +1081,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
+ the unaligned access emulation.
+ see arch/parisc/kernel/unaligned.c for reference
+
++config KALLSYMS_UNCOMPRESSED
++ bool "Keep kallsyms uncompressed"
++ depends on KALLSYMS
++ help
++ Normally kallsyms contains compressed symbols (using a token table),
++ reducing the uncompressed kernel image size. Keeping the symbol table
++ uncompressed significantly improves the size of this part in compressed
++ kernel images.
++
++ Say N unless you need compressed kernel images to be small.
++
+ config HAVE_PCSPKR_PLATFORM
+ bool
+
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -108,6 +108,11 @@ static unsigned int kallsyms_expand_symb
+ * For every byte on the compressed symbol data, copy the table
+ * entry for that byte.
+ */
++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
++ memcpy(result, data + 1, len - 1);
++ result += len - 1;
++ len = 0;
++#endif
+ while (len) {
+ tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
+ data++;
+@@ -140,6 +145,9 @@ tail:
+ */
+ static char kallsyms_get_symbol_type(unsigned int off)
+ {
++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
++ return kallsyms_names[off + 1];
++#endif
+ /*
+ * Get just the first code, look it up in the token table,
+ * and return the first char from this token.
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -61,6 +61,7 @@ static struct addr_range percpu_range =
+ static struct sym_entry *table;
+ static unsigned int table_size, table_cnt;
+ static int all_symbols = 0;
++static int uncompressed = 0;
+ static int absolute_percpu = 0;
+ static char symbol_prefix_char = '\0';
+ static int base_relative = 0;
+@@ -457,6 +458,9 @@ static void write_src(void)
+
+ free(markers);
+
++ if (uncompressed)
++ return;
++
+ output_label("kallsyms_token_table");
+ off = 0;
+ for (i = 0; i < 256; i++) {
+@@ -515,6 +519,9 @@ static void *find_token(unsigned char *s
+ {
+ int i;
+
++ if (uncompressed)
++ return NULL;
++
+ for (i = 0; i < len - 1; i++) {
+ if (str[i] == token[0] && str[i+1] == token[1])
+ return &str[i];
+@@ -587,6 +594,9 @@ static void optimize_result(void)
+ {
+ int i, best;
+
++ if (uncompressed)
++ return;
++
+ /* using the '\0' symbol last allows compress_symbols to use standard
+ * fast string functions */
+ for (i = 255; i >= 0; i--) {
+@@ -775,6 +785,8 @@ int main(int argc, char **argv)
+ symbol_prefix_char = *p;
+ } else if (strcmp(argv[i], "--base-relative") == 0)
+ base_relative = 1;
++ else if (strcmp(argv[i], "--uncompressed") == 0)
++ uncompressed = 1;
+ else
+ usage();
+ }
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -164,6 +164,10 @@ kallsyms()
+ kallsymopt="${kallsymopt} --base-relative"
+ fi
+
++ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
++ kallsymopt="${kallsymopt} --uncompressed"
++ fi
++
+ local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+
diff --git a/target/linux/generic/pending-4.14/205-backtrace_module_info.patch b/target/linux/generic/pending-4.14/205-backtrace_module_info.patch
new file mode 100644
index 0000000..4040f91
--- /dev/null
+++ b/target/linux/generic/pending-4.14/205-backtrace_module_info.patch
@@ -0,0 +1,45 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: when KALLSYMS is disabled, print module address + size for matching backtrace entries
+
+[john@phrozen.org: felix will add this to his upstream queue]
+
+lede-commit 53827cdc824556cda910b23ce5030c363b8f1461
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ lib/vsprintf.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -670,8 +670,10 @@ char *symbol_string(char *buf, char *end
+ struct printf_spec spec, const char *fmt)
+ {
+ unsigned long value;
+-#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
++#ifndef CONFIG_KALLSYMS
++ struct module *mod;
++ int len;
+ #endif
+
+ if (fmt[1] == 'R')
+@@ -685,11 +687,16 @@ char *symbol_string(char *buf, char *end
+ sprint_symbol(sym, value);
+ else
+ sprint_symbol_no_offset(sym, value);
+-
+- return string(buf, end, sym, spec);
+ #else
+- return special_hex_number(buf, end, value, sizeof(void *));
++ len = snprintf(sym, sizeof(sym), "0x%lx", value);
++
++ mod = __module_address(value);
++ if (mod)
++ snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]",
++ mod->name, mod->core_layout.base,
++ mod->core_layout.size);
+ #endif
++ return string(buf, end, sym, spec);
+ }
+
+ static noinline_for_stack
diff --git a/target/linux/generic/pending-4.14/206-mips-disable-vdso.patch b/target/linux/generic/pending-4.14/206-mips-disable-vdso.patch
new file mode 100644
index 0000000..f3c58dc
--- /dev/null
+++ b/target/linux/generic/pending-4.14/206-mips-disable-vdso.patch
@@ -0,0 +1,23 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: disable MIPS VDSO by default until the cache issues have been resolved
+
+lede-commit: 1185e645a773c86aa88cf04d0e2911dc62eb43f5
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/vdso/Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -30,9 +30,9 @@ aflags-vdso := $(ccflags-vdso) \
+ ifndef CONFIG_CPU_MIPSR6
+ ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
+ $(warning MIPS VDSO requires binutils >= 2.25)
+- obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
+- ccflags-vdso += -DDISABLE_MIPS_VDSO
+ endif
++ obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
++ ccflags-vdso += -DDISABLE_MIPS_VDSO
+ endif
+
+ # VDSO linker flags.
diff --git a/target/linux/generic/pending-4.14/240-remove-unsane-filenames-from-deps_initramfs-list.patch b/target/linux/generic/pending-4.14/240-remove-unsane-filenames-from-deps_initramfs-list.patch
new file mode 100644
index 0000000..f440305
--- /dev/null
+++ b/target/linux/generic/pending-4.14/240-remove-unsane-filenames-from-deps_initramfs-list.patch
@@ -0,0 +1,46 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: usr: sanitize deps_initramfs list
+
+If any filename in the intramfs dependency
+list contains a colon, that causes a kernel
+build error like this:
+
+/devel/openwrt/build_dir/linux-ar71xx_generic/linux-3.6.6/usr/Makefile:58: *** multiple target patterns. Stop.
+make[5]: *** [usr] Error 2
+
+Fix it by removing such filenames from the
+deps_initramfs list.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ usr/Makefile | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/usr/Makefile
++++ b/usr/Makefile
+@@ -39,20 +39,22 @@ ifneq ($(wildcard $(obj)/$(datafile_d_y)
+ include $(obj)/$(datafile_d_y)
+ endif
+
++deps_initramfs_sane := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v)))
++
+ quiet_cmd_initfs = GEN $@
+ cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
+
+ targets := $(datafile_y)
+
+ # do not try to update files included in initramfs
+-$(deps_initramfs): ;
++$(deps_initramfs_sane): ;
+
+-$(deps_initramfs): klibcdirs
++$(deps_initramfs_sane): klibcdirs
+ # We rebuild initramfs_data.cpio if:
+ # 1) Any included file is newer then initramfs_data.cpio
+ # 2) There are changes in which files are included (added or deleted)
+ # 3) If gen_init_cpio are newer than initramfs_data.cpio
+ # 4) arguments to gen_initramfs.sh changes
+-$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
++$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs_sane) klibcdirs
+ $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/$(datafile_d_y)
+ $(call if_changed,initfs)
diff --git a/target/linux/generic/pending-4.14/261-enable_wilink_platform_without_drivers.patch b/target/linux/generic/pending-4.14/261-enable_wilink_platform_without_drivers.patch
new file mode 100644
index 0000000..9955ab3
--- /dev/null
+++ b/target/linux/generic/pending-4.14/261-enable_wilink_platform_without_drivers.patch
@@ -0,0 +1,20 @@
+From: Imre Kaloz <kaloz@openwrt.org>
+Subject: [PATCH] hack: net: wireless: make the wl12xx glue code available with
+ compat-wireless, too
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ drivers/net/wireless/ti/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ti/Kconfig
++++ b/drivers/net/wireless/ti/Kconfig
+@@ -19,7 +19,7 @@ source "drivers/net/wireless/ti/wlcore/K
+
+ config WILINK_PLATFORM_DATA
+ bool "TI WiLink platform data"
+- depends on WLCORE_SDIO || WL1251_SDIO
++ depends on WLCORE_SDIO || WL1251_SDIO || ARCH_OMAP2PLUS
+ default y
+ ---help---
+ Small platform data bit needed to pass data to the sdio modules.
diff --git a/target/linux/generic/pending-4.14/270-uapi-kernel.h-glibc-specific-inclusion-of-sysinfo.h.patch b/target/linux/generic/pending-4.14/270-uapi-kernel.h-glibc-specific-inclusion-of-sysinfo.h.patch
new file mode 100644
index 0000000..9ec8daf
--- /dev/null
+++ b/target/linux/generic/pending-4.14/270-uapi-kernel.h-glibc-specific-inclusion-of-sysinfo.h.patch
@@ -0,0 +1,32 @@
+From: David Heidelberger <david.heidelberger@ixit.cz>
+Subject: uapi/kernel.h: glibc specific inclusion of sysinfo.h
+
+including sysinfo.h from kernel.h makes no sense whatsoever,
+but removing it breaks glibc's userspace header,
+which includes kernel.h instead of sysinfo.h from their sys/sysinfo.h.
+this seems to be a historical mistake.
+on musl, including any header that uses kernel.h directly or indirectly
+plus sys/sysinfo.h will produce a compile error due to redefinition of
+struct sysinfo from sys/sysinfo.h.
+so for now, only include it on glibc or when including from kernel
+in order not to break their headers.
+
+Signed-off-by: John Spencer <maillist-linux@barfooze.de>
+Signed-off-by: David Heidelberger <david.heidelberger@ixit.cz>
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/uapi/linux/kernel.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/uapi/linux/kernel.h
++++ b/include/uapi/linux/kernel.h
+@@ -2,7 +2,9 @@
+ #ifndef _UAPI_LINUX_KERNEL_H
+ #define _UAPI_LINUX_KERNEL_H
+
++#if defined(__KERNEL__) || defined( __GLIBC__)
+ #include <linux/sysinfo.h>
++#endif
+
+ /*
+ * 'kernel.h' contains some often-used function prototypes etc
diff --git a/target/linux/generic/pending-4.14/271-uapi-libc-compat.h-do-not-rely-on-__GLIBC__.patch b/target/linux/generic/pending-4.14/271-uapi-libc-compat.h-do-not-rely-on-__GLIBC__.patch
new file mode 100644
index 0000000..9ae148d
--- /dev/null
+++ b/target/linux/generic/pending-4.14/271-uapi-libc-compat.h-do-not-rely-on-__GLIBC__.patch
@@ -0,0 +1,105 @@
+From: David Heidelberger <david.heidelberger@ixit.cz>
+Subject: uapi/libc-compat.h: do not rely on __GLIBC__
+
+Musl provides the same structs as glibc, but does not provide a define to
+allow its detection. Since the absence of __GLIBC__ also can mean that it
+is included from the kernel, change the __GLIBC__ detection to
+!__KERNEL__, which should always be true when included from userspace.
+
+Signed-off-by: John Spencer <maillist-linux@barfooze.de>
+Tested-by: David Heidelberger <david.heidelberger@ixit.cz>
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/uapi/linux/libc-compat.h | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -49,13 +49,13 @@
+ #ifndef _UAPI_LIBC_COMPAT_H
+ #define _UAPI_LIBC_COMPAT_H
+
+-/* We have included glibc headers... */
+-#if defined(__GLIBC__)
++/* We have included libc headers... */
++#if !defined(__KERNEL__)
+
+-/* Coordinate with glibc net/if.h header. */
+-#if defined(_NET_IF_H) && defined(__USE_MISC)
++/* Coordinate with libc net/if.h header. */
++#if defined(_NET_IF_H) && (!defined(__GLIBC__) || defined(__USE_MISC))
+
+-/* GLIBC headers included first so don't define anything
++/* LIBC headers included first so don't define anything
+ * that would already be defined. */
+
+ #define __UAPI_DEF_IF_IFCONF 0
+@@ -66,7 +66,11 @@
+ #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+ /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+ #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
++#ifdef __GLIBC__
+ #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++#else
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 0
++#endif
+ #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+ #else /* _NET_IF_H */
+@@ -86,10 +90,10 @@
+
+ #endif /* _NET_IF_H */
+
+-/* Coordinate with glibc netinet/in.h header. */
++/* Coordinate with libc netinet/in.h header. */
+ #if defined(_NETINET_IN_H)
+
+-/* GLIBC headers included first so don't define anything
++/* LIBC headers included first so don't define anything
+ * that would already be defined. */
+ #define __UAPI_DEF_IN_ADDR 0
+ #define __UAPI_DEF_IN_IPPROTO 0
+@@ -103,7 +107,7 @@
+ * if the glibc code didn't define them. This guard matches
+ * the guard in glibc/inet/netinet/in.h which defines the
+ * additional in6_addr macros e.g. s6_addr16, and s6_addr32. */
+-#if defined(__USE_MISC) || defined (__USE_GNU)
++#if !defined(__GLIBC__) || defined(__USE_MISC) || defined (__USE_GNU)
+ #define __UAPI_DEF_IN6_ADDR_ALT 0
+ #else
+ #define __UAPI_DEF_IN6_ADDR_ALT 1
+@@ -118,7 +122,7 @@
+ #else
+
+ /* Linux headers included first, and we must define everything
+- * we need. The expectation is that glibc will check the
++ * we need. The expectation is that the libc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+ #define __UAPI_DEF_IN_ADDR 1
+ #define __UAPI_DEF_IN_IPPROTO 1
+@@ -128,7 +132,7 @@
+ #define __UAPI_DEF_IN_CLASS 1
+
+ #define __UAPI_DEF_IN6_ADDR 1
+-/* We unconditionally define the in6_addr macros and glibc must
++/* We unconditionally define the in6_addr macros and the libc must
+ * coordinate. */
+ #define __UAPI_DEF_IN6_ADDR_ALT 1
+ #define __UAPI_DEF_SOCKADDR_IN6 1
+@@ -169,7 +173,7 @@
+ /* If we did not see any headers from any supported C libraries,
+ * or we are being included in the kernel, then define everything
+ * that we need. */
+-#else /* !defined(__GLIBC__) */
++#else /* defined(__KERNEL__) */
+
+ /* Definitions for if.h */
+ #define __UAPI_DEF_IF_IFCONF 1
+@@ -209,6 +213,6 @@
+ /* Definitions for xattr.h */
+ #define __UAPI_DEF_XATTR 1
+
+-#endif /* __GLIBC__ */
++#endif /* __KERNEL__ */
+
+ #endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/target/linux/generic/pending-4.14/272-uapi-if_ether.h-prevent-redefinition-of-struct-ethhd.patch b/target/linux/generic/pending-4.14/272-uapi-if_ether.h-prevent-redefinition-of-struct-ethhd.patch
new file mode 100644
index 0000000..1dca073
--- /dev/null
+++ b/target/linux/generic/pending-4.14/272-uapi-if_ether.h-prevent-redefinition-of-struct-ethhd.patch
@@ -0,0 +1,65 @@
+From: David Heidelberger <david.heidelberger@ixit.cz>
+Subject: uapi/if_ether.h: prevent redefinition of struct ethhdr
+
+Musl provides its own ethhdr struct definition. Add a guard to prevent
+its definition of the appropriate musl header has already been included.
+
+Signed-off-by: John Spencer <maillist-linux@barfooze.de>
+Tested-by: David Heidelberger <david.heidelberger@ixit.cz>
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/uapi/linux/if_ether.h | 3 +++
+ include/uapi/linux/libc-compat.h | 11 +++++++++++
+ 2 files changed, 14 insertions(+)
+
+--- a/include/uapi/linux/if_ether.h
++++ b/include/uapi/linux/if_ether.h
+@@ -23,6 +23,7 @@
+ #define _UAPI_LINUX_IF_ETHER_H
+
+ #include <linux/types.h>
++#include <linux/libc-compat.h>
+
+ /*
+ * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+@@ -149,11 +150,13 @@
+ * This is an Ethernet frame header.
+ */
+
++#if __UAPI_DEF_ETHHDR
+ struct ethhdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ __be16 h_proto; /* packet type ID field */
+ } __attribute__((packed));
++#endif
+
+
+ #endif /* _UAPI_LINUX_IF_ETHER_H */
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -90,6 +90,14 @@
+
+ #endif /* _NET_IF_H */
+
++/* musl defines the ethhdr struct itself in its netinet/if_ether.h.
++ * Glibc just includes the kernel header and uses a different guard. */
++#if defined(_NETINET_IF_ETHER_H)
++#define __UAPI_DEF_ETHHDR 0
++#else
++#define __UAPI_DEF_ETHHDR 1
++#endif
++
+ /* Coordinate with libc netinet/in.h header. */
+ #if defined(_NETINET_IN_H)
+
+@@ -185,6 +193,9 @@
+ /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+ #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
++/* Definitions for if_ether.h */
++#define __UAPI_DEF_ETHHDR 1
++
+ /* Definitions for in.h */
+ #define __UAPI_DEF_IN_ADDR 1
+ #define __UAPI_DEF_IN_IPPROTO 1
diff --git a/target/linux/generic/pending-4.14/300-mips_expose_boot_raw.patch b/target/linux/generic/pending-4.14/300-mips_expose_boot_raw.patch
new file mode 100644
index 0000000..9419e99
--- /dev/null
+++ b/target/linux/generic/pending-4.14/300-mips_expose_boot_raw.patch
@@ -0,0 +1,40 @@
+From: Mark Miller <mark@mirell.org>
+Subject: mips: expose CONFIG_BOOT_RAW
+
+This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
+certain Broadcom chipsets running CFE in order to load the kernel.
+
+Signed-off-by: Mark Miller <mark@mirell.org>
+Acked-by: Rob Landley <rob@landley.net>
+---
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1066,9 +1066,6 @@ config FW_ARC
+ config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+-config BOOT_RAW
+- bool
+-
+ config CEVT_BCM1480
+ bool
+
+@@ -2966,6 +2963,18 @@ choice
+ bool "Extend builtin kernel arguments with bootloader arguments"
+ endchoice
+
++config BOOT_RAW
++ bool "Enable the kernel to be executed from the load address"
++ default n
++ help
++ Allow the kernel to be executed from the load address for
++ bootloaders which cannot read the ELF format. This places
++ a jump to start_kernel at the load address.
++
++ If unsure, say N.
++
++
++
+ endmenu
+
+ config LOCKDEP_SUPPORT
diff --git a/target/linux/generic/pending-4.14/302-mips_no_branch_likely.patch b/target/linux/generic/pending-4.14/302-mips_no_branch_likely.patch
new file mode 100644
index 0000000..0e46002
--- /dev/null
+++ b/target/linux/generic/pending-4.14/302-mips_no_branch_likely.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mips: use -mno-branch-likely for kernel and userspace
+
+saves ~11k kernel size after lzma and ~12k squashfs size in the
+
+lede-commit: 41a039f46450ffae9483d6216422098669da2900
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -90,7 +90,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ # machines may also. Since BFD is incredibly buggy with respect to
+ # crossformat linking we rely on the elf2ecoff tool for format conversion.
+ #
+-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
++cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+ KBUILD_AFLAGS_MODULE += -mlong-calls
diff --git a/target/linux/generic/pending-4.14/304-mips_disable_fpu.patch b/target/linux/generic/pending-4.14/304-mips_disable_fpu.patch
new file mode 100644
index 0000000..bee62b26
--- /dev/null
+++ b/target/linux/generic/pending-4.14/304-mips_disable_fpu.patch
@@ -0,0 +1,137 @@
+From: Manuel Lauss <manuel.lauss@gmail.com>
+Subject: [RFC PATCH v4 2/2] MIPS: make FPU emulator optional
+
+This small patch makes the MIPS FPU emulator optional. The kernel
+kills float-users on systems without a hardware FPU by sending a SIGILL.
+
+Disabling the emulator shrinks vmlinux by about 54kBytes (32bit,
+optimizing for size).
+
+Signed-off-by: Manuel Lauss <manuel.lauss@gmail.com>
+---
+v4: rediffed because of patch 1/2, should now work with micromips as well
+v3: updated patch description with size savings.
+v2: incorporated changes suggested by Jonas Gorski
+ force the fpu emulator on for micromips: relocating the parts
+ of the mmips code in the emulator to other areas would be a
+ much larger change; I went the cheap route instead with this.
+
+ arch/mips/Kbuild | 2 +-
+ arch/mips/Kconfig | 14 ++++++++++++++
+ arch/mips/include/asm/fpu.h | 5 +++--
+ arch/mips/include/asm/fpu_emulator.h | 15 +++++++++++++++
+ 4 files changed, 33 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2890,6 +2890,20 @@ config MIPS_O32_FP64_SUPPORT
+
+ If unsure, say N.
+
++config MIPS_FPU_EMULATOR
++ bool "MIPS FPU Emulator"
++ default y
++ help
++ This option lets you disable the built-in MIPS FPU (Coprocessor 1)
++ emulator, which handles floating-point instructions on processors
++ without a hardware FPU. It is generally a good idea to keep the
++ emulator built-in, unless you are perfectly sure you have a
++ complete soft-float environment. With the emulator disabled, all
++ users of float operations will be killed with an illegal instr-
++ uction exception.
++
++ Say Y, please.
++
+ config USE_OF
+ bool
+ select OF
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -319,7 +319,7 @@ OBJCOPYFLAGS += --remove-section=.regin
+ head-y := arch/mips/kernel/head.o
+
+ libs-y += arch/mips/lib/
+-libs-y += arch/mips/math-emu/
++libs-$(CONFIG_MIPS_FPU_EMULATOR) += arch/mips/math-emu/
+
+ # See arch/mips/Kbuild for content of core part of the kernel
+ core-y += arch/mips/
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -230,8 +230,10 @@ static inline int init_fpu(void)
+ /* Restore FRE */
+ write_c0_config5(config5);
+ enable_fpu_hazard();
+- } else
++ } else if (IS_ENABLED(CONFIG_MIPS_FPU_EMULATOR))
+ fpu_emulator_init_fpu();
++ else
++ ret = SIGILL;
+
+ return ret;
+ }
+--- a/arch/mips/include/asm/fpu_emulator.h
++++ b/arch/mips/include/asm/fpu_emulator.h
+@@ -30,6 +30,7 @@
+ #include <asm/local.h>
+ #include <asm/processor.h>
+
++#ifdef CONFIG_MIPS_FPU_EMULATOR
+ #ifdef CONFIG_DEBUG_FS
+
+ struct mips_fpu_emulator_stats {
+@@ -179,6 +180,16 @@ do { \
+ extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx, int has_fpu,
+ void __user **fault_addr);
++#else /* no CONFIG_MIPS_FPU_EMULATOR */
++static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp,
++ struct mips_fpu_struct *ctx, int has_fpu,
++ void __user **fault_addr)
++{
++ *fault_addr = NULL;
++ return SIGILL; /* we don't speak MIPS FPU */
++}
++#endif /* CONFIG_MIPS_FPU_EMULATOR */
++
+ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
+ int process_fpemu_return(int sig, void __user *fault_addr,
+--- a/arch/mips/include/asm/dsemul.h
++++ b/arch/mips/include/asm/dsemul.h
+@@ -41,6 +41,7 @@ struct task_struct;
+ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc);
+
++#ifdef CONFIG_MIPS_FPU_EMULATOR
+ /**
+ * do_dsemulret() - Return from a delay slot 'emulation' frame
+ * @xcp: User thread register context.
+@@ -88,5 +89,27 @@ extern bool dsemul_thread_rollback(struc
+ * before @mm is freed in order to avoid memory leaks.
+ */
+ extern void dsemul_mm_cleanup(struct mm_struct *mm);
++#else
++static inline bool do_dsemulret(struct pt_regs *xcp)
++{
++ return false;
++}
++
++static inline bool dsemul_thread_cleanup(struct task_struct *tsk)
++{
++ return false;
++}
++
++static inline bool dsemul_thread_rollback(struct pt_regs *regs)
++{
++ return false;
++}
++
++static inline void dsemul_mm_cleanup(struct mm_struct *mm)
++{
++
++}
++
++#endif
+
+ #endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/target/linux/generic/pending-4.14/305-mips_module_reloc.patch b/target/linux/generic/pending-4.14/305-mips_module_reloc.patch
new file mode 100644
index 0000000..253b50f
--- /dev/null
+++ b/target/linux/generic/pending-4.14/305-mips_module_reloc.patch
@@ -0,0 +1,366 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mips: replace -mlong-calls with -mno-long-calls to make function calls faster in kernel modules to achieve this, try to
+
+lede-commit: 3b3d64743ba2a874df9d70cd19e242205b0a788c
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 5 +
+ arch/mips/include/asm/module.h | 5 +
+ arch/mips/kernel/module.c | 279 ++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 284 insertions(+), 5 deletions(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -93,8 +93,13 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
++ifdef CONFIG_64BIT
+ KBUILD_AFLAGS_MODULE += -mlong-calls
+ KBUILD_CFLAGS_MODULE += -mlong-calls
++else
++KBUILD_AFLAGS_MODULE += -mno-long-calls
++KBUILD_CFLAGS_MODULE += -mno-long-calls
++endif
+
+ ifeq ($(CONFIG_RELOCATABLE),y)
+ LDFLAGS_vmlinux += --emit-relocs
+--- a/arch/mips/include/asm/module.h
++++ b/arch/mips/include/asm/module.h
+@@ -12,6 +12,11 @@ struct mod_arch_specific {
+ const struct exception_table_entry *dbe_start;
+ const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
++
++ void *phys_plt_tbl;
++ void *virt_plt_tbl;
++ unsigned int phys_plt_offset;
++ unsigned int virt_plt_offset;
+ };
+
+ typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
+--- a/arch/mips/kernel/module.c
++++ b/arch/mips/kernel/module.c
+@@ -44,14 +44,221 @@ struct mips_hi16 {
+ static LIST_HEAD(dbe_list);
+ static DEFINE_SPINLOCK(dbe_lock);
+
+-#ifdef MODULE_START
++/*
++ * Get the potential max trampolines size required of the init and
++ * non-init sections. Only used if we cannot find enough contiguous
++ * physically mapped memory to put the module into.
++ */
++static unsigned int
++get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
++ const char *secstrings, unsigned int symindex, bool is_init)
++{
++ unsigned long ret = 0;
++ unsigned int i, j;
++ Elf_Sym *syms;
++
++ /* Everything marked ALLOC (this includes the exported symbols) */
++ for (i = 1; i < hdr->e_shnum; ++i) {
++ unsigned int info = sechdrs[i].sh_info;
++
++ if (sechdrs[i].sh_type != SHT_REL
++ && sechdrs[i].sh_type != SHT_RELA)
++ continue;
++
++ /* Not a valid relocation section? */
++ if (info >= hdr->e_shnum)
++ continue;
++
++ /* Don't bother with non-allocated sections */
++ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
++ continue;
++
++ /* If it's called *.init*, and we're not init, we're
++ not interested */
++ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
++ != is_init)
++ continue;
++
++ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
++ if (sechdrs[i].sh_type == SHT_REL) {
++ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rel[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ } else {
++ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rela[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ }
++ }
++
++ return ret;
++}
++
++#ifndef MODULE_START
++static void *alloc_phys(unsigned long size)
++{
++ unsigned order;
++ struct page *page;
++ struct page *p;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_THISNODE, order);
++ if (!page)
++ return NULL;
++
++ split_page(page, order);
++
++ /* mark all pages except for the last one */
++ for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p)
++ set_bit(PG_owner_priv_1, &p->flags);
++
++ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
++ __free_page(p);
++
++ return page_address(page);
++}
++#endif
++
++static void free_phys(void *ptr)
++{
++ struct page *page;
++ bool free;
++
++ page = virt_to_page(ptr);
++ do {
++ free = test_and_clear_bit(PG_owner_priv_1, &page->flags);
++ __free_page(page);
++ page++;
++ } while (free);
++}
++
++
+ void *module_alloc(unsigned long size)
+ {
++#ifdef MODULE_START
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
++#else
++ void *ptr;
++
++ if (size == 0)
++ return NULL;
++
++ ptr = alloc_phys(size);
++
++ /* If we failed to allocate physically contiguous memory,
++ * fall back to regular vmalloc. The module loader code will
++ * create jump tables to handle long jumps */
++ if (!ptr)
++ return vmalloc(size);
++
++ return ptr;
++#endif
+ }
++
++static inline bool is_phys_addr(void *ptr)
++{
++#ifdef CONFIG_64BIT
++ return (KSEGX((unsigned long)ptr) == CKSEG0);
++#else
++ return (KSEGX(ptr) == KSEG0);
+ #endif
++}
++
++/* Free memory returned from module_alloc */
++void module_memfree(void *module_region)
++{
++ if (is_phys_addr(module_region))
++ free_phys(module_region);
++ else
++ vfree(module_region);
++}
++
++static void *__module_alloc(int size, bool phys)
++{
++ void *ptr;
++
++ if (phys)
++ ptr = kmalloc(size, GFP_KERNEL);
++ else
++ ptr = vmalloc(size);
++ return ptr;
++}
++
++static void __module_free(void *ptr)
++{
++ if (is_phys_addr(ptr))
++ kfree(ptr);
++ else
++ vfree(ptr);
++}
++
++int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
++ char *secstrings, struct module *mod)
++{
++ unsigned int symindex = 0;
++ unsigned int core_size, init_size;
++ int i;
++
++ mod->arch.phys_plt_offset = 0;
++ mod->arch.virt_plt_offset = 0;
++ mod->arch.phys_plt_tbl = NULL;
++ mod->arch.virt_plt_tbl = NULL;
++
++ if (IS_ENABLED(CONFIG_64BIT))
++ return 0;
++
++ for (i = 1; i < hdr->e_shnum; i++)
++ if (sechdrs[i].sh_type == SHT_SYMTAB)
++ symindex = i;
++
++ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
++ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
++
++ if ((core_size + init_size) == 0)
++ return 0;
++
++ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
++ if (!mod->arch.phys_plt_tbl)
++ return -ENOMEM;
++
++ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
++ if (!mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+
+ static int apply_r_mips_none(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+@@ -67,9 +274,40 @@ static int apply_r_mips_32(struct module
+ return 0;
+ }
+
++static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
++ void *start, Elf_Addr v)
++{
++ unsigned *tramp = start + *plt_offset;
++ *plt_offset += 4 * sizeof(int);
++
++ /* adjust carry for addiu */
++ if (v & 0x00008000)
++ v += 0x10000;
++
++ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
++ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
++ tramp[2] = 0x03200008; /* jr t9 */
++ tramp[3] = 0x00000000; /* nop */
++
++ return (Elf_Addr) tramp;
++}
++
++static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
++{
++ if (is_phys_addr(location))
++ return add_plt_entry_to(&me->arch.phys_plt_offset,
++ me->arch.phys_plt_tbl, v);
++ else
++ return add_plt_entry_to(&me->arch.virt_plt_offset,
++ me->arch.virt_plt_tbl, v);
++
++}
++
+ static int apply_r_mips_26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+ {
++ u32 ofs = base & 0x03ffffff;
++
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
+ me->name);
+@@ -77,13 +315,17 @@ static int apply_r_mips_26(struct module
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- pr_err("module %s: relocation overflow\n",
+- me->name);
+- return -ENOEXEC;
++ v = add_plt_entry(me, location, v + (ofs << 2));
++ if (!v) {
++ pr_err("module %s: relocation overflow\n",
++ me->name);
++ return -ENOEXEC;
++ }
++ ofs = 0;
+ }
+
+ *location = (*location & ~0x03ffffff) |
+- ((base + (v >> 2)) & 0x03ffffff);
++ ((ofs + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+ }
+@@ -459,9 +701,36 @@ int module_finalize(const Elf_Ehdr *hdr,
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
++
++ /* Get rid of the fixup trampoline if we're running the module
++ * from physically mapped address space */
++ if (me->arch.phys_plt_offset == 0) {
++ __module_free(me->arch.phys_plt_tbl);
++ me->arch.phys_plt_tbl = NULL;
++ }
++ if (me->arch.virt_plt_offset == 0) {
++ __module_free(me->arch.virt_plt_tbl);
++ me->arch.virt_plt_tbl = NULL;
++ }
++
+ return 0;
+ }
+
++void module_arch_freeing_init(struct module *mod)
++{
++ if (mod->state == MODULE_STATE_LIVE)
++ return;
++
++ if (mod->arch.phys_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ }
++ if (mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.virt_plt_tbl);
++ mod->arch.virt_plt_tbl = NULL;
++ }
++}
++
+ void module_arch_cleanup(struct module *mod)
+ {
+ spin_lock_irq(&dbe_lock);
diff --git a/target/linux/generic/pending-4.14/306-mips_mem_functions_performance.patch b/target/linux/generic/pending-4.14/306-mips_mem_functions_performance.patch
new file mode 100644
index 0000000..e73cfd6
--- /dev/null
+++ b/target/linux/generic/pending-4.14/306-mips_mem_functions_performance.patch
@@ -0,0 +1,106 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: [PATCH] mips: allow the compiler to optimize memset, memcmp, memcpy for better performance and (in some instances) smaller code
+
+lede-commit: 07e59c7bc7f375f792ec9734be42fe4fa391a8bb
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/boot/compressed/Makefile | 3 ++-
+ arch/mips/include/asm/string.h | 38 ++++++++++++++++++++++++++++++++++++++
+ arch/mips/lib/Makefile | 2 +-
+ arch/mips/lib/memcmp.c | 22 ++++++++++++++++++++++
+ 4 files changed, 63 insertions(+), 2 deletions(-)
+ create mode 100644 arch/mips/lib/memcmp.c
+
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -23,7 +23,8 @@ KBUILD_CFLAGS := $(filter-out -pg, $(KBU
+ KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS))
+
+ KBUILD_CFLAGS := $(KBUILD_CFLAGS) -D__KERNEL__ \
+- -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull"
++ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" \
++ -D__ZBOOT__
+
+ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
+--- a/arch/mips/include/asm/string.h
++++ b/arch/mips/include/asm/string.h
+@@ -140,4 +140,42 @@ extern void *memcpy(void *__to, __const_
+ #define __HAVE_ARCH_MEMMOVE
+ extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
++#ifndef __ZBOOT__
++#define memset(__s, __c, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memset((__s), (__c), __len); \
++ else \
++ __ret = __builtin_memset((__s), (__c), __len); \
++ __ret; \
++})
++
++#define memcpy(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memcpy((dst), (src), __len); \
++ else \
++ __ret = __builtin_memcpy((dst), (src), __len); \
++ __ret; \
++})
++
++#define memmove(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memmove((dst), (src), __len); \
++ else \
++ __ret = __builtin_memmove((dst), (src), __len); \
++ __ret; \
++})
++
++#define __HAVE_ARCH_MEMCMP
++#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
++#endif
++
+ #endif /* _ASM_STRING_H */
+--- a/arch/mips/lib/Makefile
++++ b/arch/mips/lib/Makefile
+@@ -5,7 +5,7 @@
+
+ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+ mips-atomic.o strncpy_user.o \
+- strnlen_user.o uncached.o
++ strnlen_user.o uncached.o memcmp.o
+
+ obj-y += iomap.o iomap_copy.o
+ obj-$(CONFIG_PCI) += iomap-pci.o
+--- /dev/null
++++ b/arch/mips/lib/memcmp.c
+@@ -0,0 +1,22 @@
++/*
++ * copied from linux/lib/string.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++#include <linux/module.h>
++#include <linux/string.h>
++
++#undef memcmp
++int memcmp(const void *cs, const void *ct, size_t count)
++{
++ const unsigned char *su1, *su2;
++ int res = 0;
++
++ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
++ if ((res = *su1 - *su2) != 0)
++ break;
++ return res;
++}
++EXPORT_SYMBOL(memcmp);
++
diff --git a/target/linux/generic/pending-4.14/307-mips_highmem_offset.patch b/target/linux/generic/pending-4.14/307-mips_highmem_offset.patch
new file mode 100644
index 0000000..0529b0c
--- /dev/null
+++ b/target/linux/generic/pending-4.14/307-mips_highmem_offset.patch
@@ -0,0 +1,19 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: adjust mips highmem offset to avoid the need for -mlong-calls on systems with >256M RAM
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/include/asm/mach-generic/spaces.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -46,7 +46,7 @@
+ * Memory above this physical address will be considered highmem.
+ */
+ #ifndef HIGHMEM_START
+-#define HIGHMEM_START _AC(0x20000000, UL)
++#define HIGHMEM_START _AC(0x10000000, UL)
+ #endif
+
+ #endif /* CONFIG_32BIT */
diff --git a/target/linux/generic/pending-4.14/308-mips32r2_tune.patch b/target/linux/generic/pending-4.14/308-mips32r2_tune.patch
new file mode 100644
index 0000000..7f98616
--- /dev/null
+++ b/target/linux/generic/pending-4.14/308-mips32r2_tune.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add -mtune=34kc to MIPS CFLAGS when building for mips32r2
+
+This provides a good tradeoff across at least 24Kc-74Kc, while also
+producing smaller code.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -162,7 +162,7 @@ cflags-$(CONFIG_CPU_R4X00) += -march=r46
+ cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+ -Wa,-mips32 -Wa,--trap
+-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
++cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2 -mtune=34kc,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+ -Wa,-mips32r2 -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
+ cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
diff --git a/target/linux/generic/pending-4.14/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/pending-4.14/310-arm_module_unresolved_weak_sym.patch
new file mode 100644
index 0000000..bc9f0a4
--- /dev/null
+++ b/target/linux/generic/pending-4.14/310-arm_module_unresolved_weak_sym.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: fix errors in unresolved weak symbols on arm
+
+lede-commit: 570699d4838a907c3ef9f2819bf19eb72997b32f
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/arm/kernel/module.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -95,6 +95,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
+ return -ENOEXEC;
+ }
+
++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
++ ELF_ST_BIND(sym->st_info) == STB_WEAK)
++ continue;
++
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
diff --git a/target/linux/generic/pending-4.14/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch b/target/linux/generic/pending-4.14/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
new file mode 100644
index 0000000..5cdcc2b
--- /dev/null
+++ b/target/linux/generic/pending-4.14/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
@@ -0,0 +1,272 @@
+From: Yousong Zhou <yszhou4tech@gmail.com>
+Subject: MIPS: kexec: Accept command line parameters from userspace.
+
+Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
+---
+ arch/mips/kernel/machine_kexec.c | 153 +++++++++++++++++++++++++++++++-----
+ arch/mips/kernel/machine_kexec.h | 20 +++++
+ arch/mips/kernel/relocate_kernel.S | 21 +++--
+ 3 files changed, 167 insertions(+), 27 deletions(-)
+ create mode 100644 arch/mips/kernel/machine_kexec.h
+
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -10,14 +10,11 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+
++#include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern const size_t relocate_new_kernel_size;
+-
+-extern unsigned long kexec_start_address;
+-extern unsigned long kexec_indirection_page;
++#include <asm/uaccess.h>
++#include "machine_kexec.h"
+
+ int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+ void (*_machine_kexec_shutdown)(void) = NULL;
+@@ -28,6 +25,99 @@ atomic_t kexec_ready_to_reboot = ATOMIC_
+ void (*_crash_smp_send_stop)(void) = NULL;
+ #endif
+
++static void machine_kexec_print_args(void)
++{
++ unsigned long argc = (int)kexec_args[0];
++ int i;
++
++ pr_info("kexec_args[0] (argc): %lu\n", argc);
++ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
++ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
++ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
++
++ for (i = 0; i < argc; i++) {
++ pr_info("kexec_argv[%d] = %p, %s\n",
++ i, kexec_argv[i], kexec_argv[i]);
++ }
++}
++
++static void machine_kexec_init_argv(struct kimage *image)
++{
++ void __user *buf = NULL;
++ size_t bufsz;
++ size_t size;
++ int i;
++
++ bufsz = 0;
++ for (i = 0; i < image->nr_segments; i++) {
++ struct kexec_segment *seg;
++
++ seg = &image->segment[i];
++ if (seg->bufsz < 6)
++ continue;
++
++ if (strncmp((char *) seg->buf, "kexec ", 6))
++ continue;
++
++ buf = seg->buf;
++ bufsz = seg->bufsz;
++ break;
++ }
++
++ if (!buf)
++ return;
++
++ size = KEXEC_COMMAND_LINE_SIZE;
++ size = min(size, bufsz);
++ if (size < bufsz)
++ pr_warn("kexec command line truncated to %zd bytes\n", size);
++
++ /* Copy to kernel space */
++ copy_from_user(kexec_argv_buf, buf, size);
++ kexec_argv_buf[size - 1] = 0;
++}
++
++static void machine_kexec_parse_argv(struct kimage *image)
++{
++ char *reboot_code_buffer;
++ int reloc_delta;
++ char *ptr;
++ int argc;
++ int i;
++
++ ptr = kexec_argv_buf;
++ argc = 0;
++
++ /*
++ * convert command line string to array of parameters
++ * (as bootloader does).
++ */
++ while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) {
++ if (*ptr == ' ') {
++ *ptr++ = '\0';
++ continue;
++ }
++
++ kexec_argv[argc++] = ptr;
++ ptr = strchr(ptr, ' ');
++ }
++
++ if (!argc)
++ return;
++
++ kexec_args[0] = argc;
++ kexec_args[1] = (unsigned long)kexec_argv;
++ kexec_args[2] = 0;
++ kexec_args[3] = 0;
++
++ reboot_code_buffer = page_address(image->control_code_page);
++ reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel;
++
++ kexec_args[1] += reloc_delta;
++ for (i = 0; i < argc; i++)
++ kexec_argv[i] += reloc_delta;
++}
++
+ static void kexec_image_info(const struct kimage *kimage)
+ {
+ unsigned long i;
+@@ -52,6 +142,18 @@ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
+ kexec_image_info(kimage);
++ /*
++ * Whenever arguments passed from kexec-tools, Init the arguments as
++ * the original ones to try avoiding booting failure.
++ */
++
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ machine_kexec_init_argv(kimage);
++ machine_kexec_parse_argv(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+@@ -89,10 +191,12 @@ machine_kexec(struct kimage *image)
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+- (unsigned long)page_address(image->control_code_page);
++ (unsigned long)page_address(image->control_code_page);
++ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
++ pr_info("kexec_start_address = %p\n", (void *)kexec_start_address);
+
+ if (image->type == KEXEC_TYPE_DEFAULT) {
+ kexec_indirection_page =
+@@ -100,9 +204,19 @@ machine_kexec(struct kimage *image)
+ } else {
+ kexec_indirection_page = (unsigned long)&image->head;
+ }
++ pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page);
+
+- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
++ pr_info("Where is memcpy: %p\n", memcpy);
++ pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n",
++ (void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end);
++ pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE,
++ (void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer);
++ memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel,
++ KEXEC_RELOCATE_NEW_KERNEL_SIZE);
++
++ pr_info("Before _print_args().\n");
++ machine_kexec_print_args();
++ pr_info("Before eval loop.\n");
+
+ /*
+ * The generic kexec code builds a page list with physical
+@@ -121,15 +235,16 @@ machine_kexec(struct kimage *image)
+ /*
+ * we do not want to be bothered.
+ */
++ pr_info("Before irq_disable.\n");
+ local_irq_disable();
+
+- printk("Will call new kernel at %08lx\n", image->start);
+- printk("Bye ...\n");
++ pr_info("Will call new kernel at %08lx\n", image->start);
++ pr_info("Bye ...\n");
+ __flush_cache_all();
+ #ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+- (void *)(kexec_smp_wait - relocate_new_kernel);
++ (void *)(kexec_smp_wait - kexec_relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+ #endif
+--- /dev/null
++++ b/arch/mips/kernel/machine_kexec.h
+@@ -0,0 +1,20 @@
++#ifndef _MACHINE_KEXEC_H
++#define _MACHINE_KEXEC_H
++
++#ifndef __ASSEMBLY__
++extern const unsigned char kexec_relocate_new_kernel[];
++extern unsigned long kexec_relocate_new_kernel_end;
++extern unsigned long kexec_start_address;
++extern unsigned long kexec_indirection_page;
++
++extern char kexec_argv_buf[];
++extern char *kexec_argv[];
++
++#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel)
++#endif /* !__ASSEMBLY__ */
++
++#define KEXEC_COMMAND_LINE_SIZE 256
++#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16)
++#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long))
++
++#endif
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -12,8 +12,9 @@
+ #include <asm/mipsregs.h>
+ #include <asm/stackframe.h>
+ #include <asm/addrspace.h>
++#include "machine_kexec.h"
+
+-LEAF(relocate_new_kernel)
++LEAF(kexec_relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+@@ -98,7 +99,7 @@ done:
+ #endif
+ /* jump to kexec_start_address */
+ j s1
+- END(relocate_new_kernel)
++ END(kexec_relocate_new_kernel)
+
+ #ifdef CONFIG_SMP
+ /*
+@@ -184,9 +185,15 @@ kexec_indirection_page:
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+
+-relocate_new_kernel_end:
++kexec_argv_buf:
++ EXPORT(kexec_argv_buf)
++ .skip KEXEC_COMMAND_LINE_SIZE
++ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
++
++kexec_argv:
++ EXPORT(kexec_argv)
++ .skip KEXEC_ARGV_SIZE
++ .size kexec_argv, KEXEC_ARGV_SIZE
+
+-relocate_new_kernel_size:
+- EXPORT(relocate_new_kernel_size)
+- PTR relocate_new_kernel_end - relocate_new_kernel
+- .size relocate_new_kernel_size, PTRSIZE
++kexec_relocate_new_kernel_end:
++ EXPORT(kexec_relocate_new_kernel_end)
diff --git a/target/linux/generic/pending-4.14/332-arc-add-OWRTDTB-section.patch b/target/linux/generic/pending-4.14/332-arc-add-OWRTDTB-section.patch
new file mode 100644
index 0000000..58f14b6
--- /dev/null
+++ b/target/linux/generic/pending-4.14/332-arc-add-OWRTDTB-section.patch
@@ -0,0 +1,80 @@
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Subject: openwrt: arc - add OWRTDTB section
+
+This change allows OpenWRT to patch resulting kernel binary with
+external .dtb.
+
+That allows us to re-use exactky the same vmlinux on different boards
+given its ARC core configurations match (at least cache line sizes etc).
+
+""patch-dtb" searches for ASCII "OWRTDTB:" strign and copies external
+.dtb right after it, keeping the string in place.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+---
+ arch/arc/kernel/head.S | 10 ++++++++++
+ arch/arc/kernel/setup.c | 4 +++-
+ arch/arc/kernel/vmlinux.lds.S | 13 +++++++++++++
+ 3 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -49,6 +49,16 @@
+ 1:
+ .endm
+
++; Here "patch-dtb" will embed external .dtb
++; Note "patch-dtb" searches for ASCII "OWRTDTB:" string
++; and pastes .dtb right after it, hense the string precedes
++; __image_dtb symbol.
++ .section .owrt, "aw",@progbits
++ .ascii "OWRTDTB:"
++ENTRY(__image_dtb)
++ .fill 0x4000
++END(__image_dtb)
++
+ .section .init.text, "ax",@progbits
+
+ ;----------------------------------------------------------------
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -421,6 +421,8 @@ static inline int is_kernel(unsigned lon
+ return 0;
+ }
+
++extern struct boot_param_header __image_dtb;
++
+ void __init setup_arch(char **cmdline_p)
+ {
+ #ifdef CONFIG_ARC_UBOOT_SUPPORT
+@@ -434,7 +436,7 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ {
+ /* No, so try the embedded one */
+- machine_desc = setup_machine_fdt(__dtb_start);
++ machine_desc = setup_machine_fdt(&__image_dtb);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+
+--- a/arch/arc/kernel/vmlinux.lds.S
++++ b/arch/arc/kernel/vmlinux.lds.S
+@@ -30,6 +30,19 @@ SECTIONS
+
+ . = CONFIG_LINUX_LINK_BASE;
+
++ /*
++ * In OpenWRT we want to patch built binary embedding .dtb of choice.
++ * This is implemented with "patch-dtb" utility which searches for
++ * "OWRTDTB:" string in first 16k of image and if it is found
++ * copies .dtb right after mentioned string.
++ *
++ * Note: "OWRTDTB:" won't be overwritten with .dtb, .dtb will follow it.
++ */
++ .owrt : {
++ *(.owrt)
++ . = ALIGN(PAGE_SIZE);
++ }
++
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
diff --git a/target/linux/generic/pending-4.14/333-arc-enable-unaligned-access-in-kernel-mode.patch b/target/linux/generic/pending-4.14/333-arc-enable-unaligned-access-in-kernel-mode.patch
new file mode 100644
index 0000000..4e0265a
--- /dev/null
+++ b/target/linux/generic/pending-4.14/333-arc-enable-unaligned-access-in-kernel-mode.patch
@@ -0,0 +1,24 @@
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Subject: arc: enable unaligned access in kernel mode
+
+This enables misaligned access handling even in kernel mode.
+Some wireless drivers (ath9k-htc and mt7601u) use misaligned accesses
+here and there and to cope with that without fixing stuff in the drivers
+we're just gracefully handling it on ARC.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+---
+ arch/arc/kernel/unaligned.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -206,7 +206,7 @@ int misaligned_fixup(unsigned long addre
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+- if (!user_mode(regs) || !unaligned_enabled)
++ if (!unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
diff --git a/target/linux/generic/pending-4.14/340-MIPS-mm-remove-mips_dma_mapping_error.patch b/target/linux/generic/pending-4.14/340-MIPS-mm-remove-mips_dma_mapping_error.patch
new file mode 100644
index 0000000..8d6cada
--- /dev/null
+++ b/target/linux/generic/pending-4.14/340-MIPS-mm-remove-mips_dma_mapping_error.patch
@@ -0,0 +1,32 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Tue, 5 Dec 2017 12:34:31 +0100
+Subject: [PATCH] MIPS: mm: remove mips_dma_mapping_error
+
+dma_mapping_error() already checks if ops->mapping_error is a null
+pointer
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -373,11 +373,6 @@ static void mips_dma_sync_sg_for_device(
+ }
+ }
+
+-static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static int mips_dma_supported(struct device *dev, u64 mask)
+ {
+ return plat_dma_supported(dev, mask);
+@@ -406,7 +401,6 @@ static const struct dma_map_ops mips_def
+ .sync_single_for_device = mips_dma_sync_single_for_device,
+ .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = mips_dma_sync_sg_for_device,
+- .mapping_error = mips_dma_mapping_error,
+ .dma_supported = mips_dma_supported
+ };
+
diff --git a/target/linux/generic/pending-4.14/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch b/target/linux/generic/pending-4.14/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch
new file mode 100644
index 0000000..3cd8786
--- /dev/null
+++ b/target/linux/generic/pending-4.14/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch
@@ -0,0 +1,140 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Tue, 5 Dec 2017 12:46:01 +0100
+Subject: [PATCH] MIPS: mm: remove no-op dma_map_ops where possible
+
+If no post-DMA flush is required, and the platform does not provide
+plat_unmap_dma_mem(), there is no need to include unmap or sync_for_cpu
+ops.
+
+With this patch they are compiled out to improve icache footprint
+on devices that handle lots of DMA traffic (especially network routers).
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -220,6 +220,7 @@ config BMIPS_GENERIC
+ select BRCMSTB_L2_IRQ
+ select IRQ_MIPS_CPU
+ select DMA_NONCOHERENT
++ select DMA_UNMAP_POST_FLUSH
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_BIG_ENDIAN
+@@ -345,6 +346,7 @@ config MACH_JAZZ
+ select CSRC_R4K
+ select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
+ select GENERIC_ISA_DMA
++ select DMA_UNMAP_POST_FLUSH
+ select HAVE_PCSPKR_PLATFORM
+ select IRQ_MIPS_CPU
+ select I8253
+@@ -1127,6 +1129,9 @@ config DMA_NONCOHERENT
+ bool
+ select NEED_DMA_MAP_STATE
+
++config DMA_UNMAP_POST_FLUSH
++ bool
++
+ config NEED_DMA_MAP_STATE
+ bool
+
+@@ -1651,6 +1656,7 @@ config CPU_R10000
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
++ select DMA_UNMAP_POST_FLUSH
+ help
+ MIPS Technologies R10000-series processors.
+
+@@ -1899,9 +1905,11 @@ config SYS_HAS_CPU_MIPS32_R3_5
+ bool
+
+ config SYS_HAS_CPU_MIPS32_R5
++ select DMA_UNMAP_POST_FLUSH
+ bool
+
+ config SYS_HAS_CPU_MIPS32_R6
++ select DMA_UNMAP_POST_FLUSH
+ bool
+
+ config SYS_HAS_CPU_MIPS64_R1
+@@ -1911,6 +1919,7 @@ config SYS_HAS_CPU_MIPS64_R2
+ bool
+
+ config SYS_HAS_CPU_MIPS64_R6
++ select DMA_UNMAP_POST_FLUSH
+ bool
+
+ config SYS_HAS_CPU_R3000
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -267,8 +267,9 @@ static inline void __dma_sync(struct pag
+ } while (left);
+ }
+
+-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+- size_t size, enum dma_data_direction direction, unsigned long attrs)
++static void __maybe_unused
++mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction, unsigned long attrs)
+ {
+ if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync(dma_addr_to_page(dev, dma_addr),
+@@ -308,9 +309,10 @@ static dma_addr_t mips_dma_map_page(stru
+ return plat_map_dma_mem_page(dev, page) + offset;
+ }
+
+-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+- int nhwentries, enum dma_data_direction direction,
+- unsigned long attrs)
++static void __maybe_unused
++mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
++ int nhwentries, enum dma_data_direction direction,
++ unsigned long attrs)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -325,8 +327,9 @@ static void mips_dma_unmap_sg(struct dev
+ }
+ }
+
+-static void mips_dma_sync_single_for_cpu(struct device *dev,
+- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
++static void __maybe_unused
++mips_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
++ size_t size, enum dma_data_direction direction)
+ {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, dma_handle),
+@@ -342,9 +345,9 @@ static void mips_dma_sync_single_for_dev
+ dma_handle & ~PAGE_MASK, size, direction);
+ }
+
+-static void mips_dma_sync_sg_for_cpu(struct device *dev,
+- struct scatterlist *sglist, int nelems,
+- enum dma_data_direction direction)
++static void __maybe_unused
++mips_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
++ int nelems, enum dma_data_direction direction)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -394,12 +397,14 @@ static const struct dma_map_ops mips_def
+ .free = mips_dma_free_coherent,
+ .mmap = mips_dma_mmap,
+ .map_page = mips_dma_map_page,
+- .unmap_page = mips_dma_unmap_page,
+ .map_sg = mips_dma_map_sg,
++#ifdef CONFIG_DMA_UNMAP_POST_FLUSH
++ .unmap_page = mips_dma_unmap_page,
+ .unmap_sg = mips_dma_unmap_sg,
+ .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
+- .sync_single_for_device = mips_dma_sync_single_for_device,
+ .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
++#endif
++ .sync_single_for_device = mips_dma_sync_single_for_device,
+ .sync_sg_for_device = mips_dma_sync_sg_for_device,
+ .dma_supported = mips_dma_supported
+ };
diff --git a/target/linux/generic/pending-4.14/400-mtd-add-rootfs-split-support.patch b/target/linux/generic/pending-4.14/400-mtd-add-rootfs-split-support.patch
new file mode 100644
index 0000000..f0c852a
--- /dev/null
+++ b/target/linux/generic/pending-4.14/400-mtd-add-rootfs-split-support.patch
@@ -0,0 +1,123 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: make rootfs split/detection more generic - patch can be moved to generic-2.6 after testing on other platforms
+
+lede-commit: 328e660b31f0937d52c5ae3d6e7029409918a9df
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/Kconfig | 17 +++++++++++++++++
+ drivers/mtd/mtdpart.c | 35 +++++++++++++++++++++++++++++++++++
+ include/linux/mtd/partitions.h | 2 ++
+ 3 files changed, 54 insertions(+)
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -12,6 +12,23 @@ menuconfig MTD
+
+ if MTD
+
++menu "OpenWrt specific MTD options"
++
++config MTD_ROOTFS_ROOT_DEV
++ bool "Automatically set 'rootfs' partition to be root filesystem"
++ default y
++
++config MTD_SPLIT_FIRMWARE
++ bool "Automatically split firmware partition for kernel+rootfs"
++ default y
++
++config MTD_SPLIT_FIRMWARE_NAME
++ string "Firmware partition name"
++ depends on MTD_SPLIT_FIRMWARE
++ default "firmware"
++
++endmenu
++
+ config MTD_TESTS
+ tristate "MTD tests support (DANGEROUS)"
+ depends on m
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -29,10 +29,12 @@
+ #include <linux/kmod.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/magic.h>
+ #include <linux/of.h>
+ #include <linux/err.h>
+
+ #include "mtdcore.h"
++#include "mtdsplit/mtdsplit.h"
+
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+@@ -52,6 +54,8 @@ struct mtd_part {
+ struct list_head list;
+ };
+
++static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
++
+ /*
+ * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
+ * the pointer to that structure.
+@@ -686,6 +690,7 @@ int mtd_add_partition(struct mtd_info *p
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&new->mtd);
++ mtd_partition_split(parent, new);
+
+ mtd_add_partition_attrs(new);
+
+@@ -764,6 +769,35 @@ int mtd_del_partition(struct mtd_info *m
+ }
+ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
++#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
++#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
++#else
++#define SPLIT_FIRMWARE_NAME "unused"
++#endif
++
++static void split_firmware(struct mtd_info *master, struct mtd_part *part)
++{
++}
++
++void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,
++ int offset, int size)
++{
++}
++
++static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
++{
++ static int rootfs_found = 0;
++
++ if (rootfs_found)
++ return;
++
++ if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
++ IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE))
++ split_firmware(master, part);
++
++ arch_split_mtd_part(master, part->mtd.name, part->offset,
++ part->mtd.size);
++}
+ /*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+@@ -795,6 +829,7 @@ int add_mtd_partitions(struct mtd_info *
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&slave->mtd);
++ mtd_partition_split(master, slave);
+ mtd_add_partition_attrs(slave);
+ if (parts[i].types)
+ mtd_parse_part(slave, parts[i].types);
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -109,5 +109,7 @@ int mtd_add_partition(struct mtd_info *m
+ long long offset, long long length);
+ int mtd_del_partition(struct mtd_info *master, int partno);
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd);
++extern void __weak arch_split_mtd_part(struct mtd_info *master,
++ const char *name, int offset, int size);
+
+ #endif
diff --git a/target/linux/generic/pending-4.14/401-mtd-add-support-for-different-partition-parser-types.patch b/target/linux/generic/pending-4.14/401-mtd-add-support-for-different-partition-parser-types.patch
new file mode 100644
index 0000000..77ee85a
--- /dev/null
+++ b/target/linux/generic/pending-4.14/401-mtd-add-support-for-different-partition-parser-types.patch
@@ -0,0 +1,110 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: mtd: add support for different partition parser types
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdpart.c | 56 ++++++++++++++++++++++++++++++++++++++++
+ include/linux/mtd/partitions.h | 11 ++++++++
+ 2 files changed, 67 insertions(+)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -1034,6 +1034,62 @@ void mtd_part_parser_cleanup(struct mtd_
+ }
+ }
+
++static struct mtd_part_parser *
++get_partition_parser_by_type(enum mtd_parser_type type,
++ struct mtd_part_parser *start)
++{
++ struct mtd_part_parser *p, *ret = NULL;
++
++ spin_lock(&part_parser_lock);
++
++ p = list_prepare_entry(start, &part_parsers, list);
++ if (start)
++ mtd_part_parser_put(start);
++
++ list_for_each_entry_continue(p, &part_parsers, list) {
++ if (p->type == type && try_module_get(p->owner)) {
++ ret = p;
++ break;
++ }
++ }
++
++ spin_unlock(&part_parser_lock);
++
++ return ret;
++}
++
++int parse_mtd_partitions_by_type(struct mtd_info *master,
++ enum mtd_parser_type type,
++ const struct mtd_partition **pparts,
++ struct mtd_part_parser_data *data)
++{
++ struct mtd_part_parser *prev = NULL;
++ int ret = 0;
++
++ while (1) {
++ struct mtd_part_parser *parser;
++
++ parser = get_partition_parser_by_type(type, prev);
++ if (!parser)
++ break;
++
++ ret = (*parser->parse_fn)(master, pparts, data);
++
++ if (ret > 0) {
++ mtd_part_parser_put(parser);
++ printk(KERN_NOTICE
++ "%d %s partitions found on MTD device %s\n",
++ ret, parser->name, master->name);
++ break;
++ }
++
++ prev = parser;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
++
+ int mtd_is_partition(const struct mtd_info *mtd)
+ {
+ struct mtd_part *part;
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -68,11 +68,14 @@ struct mtd_part_parser_data {
+ unsigned long origin;
+ };
+
+-
+ /*
+ * Functions dealing with the various ways of partitioning the space
+ */
+
++enum mtd_parser_type {
++ MTD_PARSER_TYPE_DEVICE = 0,
++};
++
+ struct mtd_part_parser {
+ struct list_head list;
+ struct module *owner;
+@@ -80,6 +83,7 @@ struct mtd_part_parser {
+ int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
+ struct mtd_part_parser_data *);
+ void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
++ enum mtd_parser_type type;
+ };
+
+ /* Container for passing around a set of parsed partitions */
+@@ -112,4 +116,9 @@ uint64_t mtd_get_device_size(const struc
+ extern void __weak arch_split_mtd_part(struct mtd_info *master,
+ const char *name, int offset, int size);
+
++int parse_mtd_partitions_by_type(struct mtd_info *master,
++ enum mtd_parser_type type,
++ const struct mtd_partition **pparts,
++ struct mtd_part_parser_data *data);
++
+ #endif
diff --git a/target/linux/generic/pending-4.14/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch b/target/linux/generic/pending-4.14/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch
new file mode 100644
index 0000000..dff9c31
--- /dev/null
+++ b/target/linux/generic/pending-4.14/402-mtd-use-typed-mtd-parsers-for-rootfs-and-firmware-split.patch
@@ -0,0 +1,81 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.10: allow to use partition parsers for rootfs and firmware split
+
+lede-commit: 3b71cd94bc9517bc25267dccb393b07d4b54564e
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdpart.c | 37 +++++++++++++++++++++++++++++++++++++
+ include/linux/mtd/partitions.h | 2 ++
+ 2 files changed, 39 insertions(+)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -769,6 +769,36 @@ int mtd_del_partition(struct mtd_info *m
+ }
+ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
++static int
++run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type)
++{
++ struct mtd_partition *parts;
++ int nr_parts;
++ int i;
++
++ nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, (const struct mtd_partition **)&parts,
++ NULL);
++ if (nr_parts <= 0)
++ return nr_parts;
++
++ if (WARN_ON(!parts))
++ return 0;
++
++ for (i = 0; i < nr_parts; i++) {
++ /* adjust partition offsets */
++ parts[i].offset += slave->offset;
++
++ mtd_add_partition(slave->parent,
++ parts[i].name,
++ parts[i].offset,
++ parts[i].size);
++ }
++
++ kfree(parts);
++
++ return nr_parts;
++}
++
+ #ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
+ #define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
+ #else
+@@ -777,6 +807,7 @@ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
+ static void split_firmware(struct mtd_info *master, struct mtd_part *part)
+ {
++ run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
+ }
+
+ void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,
+@@ -791,6 +822,12 @@ static void mtd_partition_split(struct m
+ if (rootfs_found)
+ return;
+
++ if (!strcmp(part->mtd.name, "rootfs")) {
++ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
++
++ rootfs_found = 1;
++ }
++
+ if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
+ IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE))
+ split_firmware(master, part);
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -74,6 +74,8 @@ struct mtd_part_parser_data {
+
+ enum mtd_parser_type {
+ MTD_PARSER_TYPE_DEVICE = 0,
++ MTD_PARSER_TYPE_ROOTFS,
++ MTD_PARSER_TYPE_FIRMWARE,
+ };
+
+ struct mtd_part_parser {
diff --git a/target/linux/generic/pending-4.14/403-mtd-hook-mtdsplit-to-Kbuild.patch b/target/linux/generic/pending-4.14/403-mtd-hook-mtdsplit-to-Kbuild.patch
new file mode 100644
index 0000000..89c202b
--- /dev/null
+++ b/target/linux/generic/pending-4.14/403-mtd-hook-mtdsplit-to-Kbuild.patch
@@ -0,0 +1,32 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: [PATCH] kernel/3.10: move squashfs check from rootfs split code into a separate file
+
+lede-commit: d89bea92b31b4e157a0fa438e75370f089f73427
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/Kconfig | 2 ++
+ drivers/mtd/Makefile | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -27,6 +27,8 @@ config MTD_SPLIT_FIRMWARE_NAME
+ depends on MTD_SPLIT_FIRMWARE
+ default "firmware"
+
++source "drivers/mtd/mtdsplit/Kconfig"
++
+ endmenu
+
+ config MTD_TESTS
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -7,6 +7,8 @@
+ obj-$(CONFIG_MTD) += mtd.o
+ mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+
++obj-$(CONFIG_MTD_SPLIT) += mtdsplit/
++
+ obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
diff --git a/target/linux/generic/pending-4.14/404-mtd-add-more-helper-functions.patch b/target/linux/generic/pending-4.14/404-mtd-add-more-helper-functions.patch
new file mode 100644
index 0000000..97bd773
--- /dev/null
+++ b/target/linux/generic/pending-4.14/404-mtd-add-more-helper-functions.patch
@@ -0,0 +1,94 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.10: add separate rootfs partition parser
+
+lede-commit: daec7ad7688415156e2730e401503d09bd3acf91
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdpart.c | 29 +++++++++++++++++++++++++++++
+ include/linux/mtd/mtd.h | 18 ++++++++++++++++++
+ include/linux/mtd/partitions.h | 2 ++
+ 3 files changed, 49 insertions(+)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -799,6 +799,17 @@ run_parsers_by_type(struct mtd_part *sla
+ return nr_parts;
+ }
+
++static inline unsigned long
++mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
++{
++ unsigned long mask = mtd->erasesize - 1;
++
++ len += offset & mask;
++ len = (len + mask) & ~mask;
++ len -= offset & mask;
++ return len;
++}
++
+ #ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
+ #define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
+ #else
+@@ -1144,6 +1155,24 @@ int mtd_is_partition(const struct mtd_in
+ }
+ EXPORT_SYMBOL_GPL(mtd_is_partition);
+
++struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
++{
++ if (!mtd_is_partition(mtd))
++ return (struct mtd_info *)mtd;
++
++ return mtd_to_part(mtd)->parent;
++}
++EXPORT_SYMBOL_GPL(mtdpart_get_master);
++
++uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
++{
++ if (!mtd_is_partition(mtd))
++ return 0;
++
++ return mtd_to_part(mtd)->offset;
++}
++EXPORT_SYMBOL_GPL(mtdpart_get_offset);
++
+ /* Returns the size of the entire flash chip */
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+ {
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -493,6 +493,24 @@ static inline uint32_t mtd_mod_by_eb(uin
+ return do_div(sz, mtd->erasesize);
+ }
+
++static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
++{
++ if (mtd_mod_by_eb(sz, mtd) == 0)
++ return sz;
++
++ /* Round up to next erase block */
++ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
++}
++
++static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
++{
++ if (mtd_mod_by_eb(sz, mtd) == 0)
++ return sz;
++
++ /* Round down to the start of the current erase block */
++ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
++}
++
+ static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
+ {
+ if (mtd->writesize_shift)
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -114,6 +114,8 @@ int mtd_is_partition(const struct mtd_in
+ int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length);
+ int mtd_del_partition(struct mtd_info *master, int partno);
++struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
++uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+ extern void __weak arch_split_mtd_part(struct mtd_info *master,
+ const char *name, int offset, int size);
diff --git a/target/linux/generic/pending-4.14/411-mtd-partial_eraseblock_write.patch b/target/linux/generic/pending-4.14/411-mtd-partial_eraseblock_write.patch
new file mode 100644
index 0000000..096d6a4
--- /dev/null
+++ b/target/linux/generic/pending-4.14/411-mtd-partial_eraseblock_write.patch
@@ -0,0 +1,154 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mtd: implement write support for partitions covering only a part of an eraseblock (buffer data that would otherwise be erased)
+
+lede-commit: 87a8e8ac1067f58ba831c4aae443f3655c31cd80
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/mtdpart.c | 90 ++++++++++++++++++++++++++++++++++++++++++++-----
+ include/linux/mtd/mtd.h | 4 +++
+ 2 files changed, 85 insertions(+), 9 deletions(-)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -36,6 +36,8 @@
+ #include "mtdcore.h"
+ #include "mtdsplit/mtdsplit.h"
+
++#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
++
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+ static DEFINE_MUTEX(mtd_partitions_mutex);
+@@ -241,13 +243,61 @@ static int part_erase(struct mtd_info *m
+ struct mtd_part *part = mtd_to_part(mtd);
+ int ret;
+
++
++ instr->partial_start = false;
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ size_t readlen = 0;
++ u64 mtd_ofs;
++
++ instr->erase_buf = kmalloc(part->parent->erasesize, GFP_ATOMIC);
++ if (!instr->erase_buf)
++ return -ENOMEM;
++
++ mtd_ofs = part->offset + instr->addr;
++ instr->erase_buf_ofs = do_div(mtd_ofs, part->parent->erasesize);
++
++ if (instr->erase_buf_ofs > 0) {
++ instr->addr -= instr->erase_buf_ofs;
++ ret = mtd_read(part->parent,
++ instr->addr + part->offset,
++ part->parent->erasesize,
++ &readlen, instr->erase_buf);
++
++ instr->len += instr->erase_buf_ofs;
++ instr->partial_start = true;
++ } else {
++ mtd_ofs = part->offset + part->mtd.size;
++ instr->erase_buf_ofs = part->parent->erasesize -
++ do_div(mtd_ofs, part->parent->erasesize);
++
++ if (instr->erase_buf_ofs > 0) {
++ instr->len += instr->erase_buf_ofs;
++ ret = mtd_read(part->parent,
++ part->offset + instr->addr +
++ instr->len - part->parent->erasesize,
++ part->parent->erasesize, &readlen,
++ instr->erase_buf);
++ } else {
++ ret = 0;
++ }
++ }
++ if (ret < 0) {
++ kfree(instr->erase_buf);
++ return ret;
++ }
++
++ }
++
+ instr->addr += part->offset;
+ ret = part->parent->_erase(part->parent, instr);
+ if (ret) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
++ if (mtd->flags & MTD_ERASE_PARTIAL)
++ kfree(instr->erase_buf);
+ }
++
+ return ret;
+ }
+
+@@ -255,6 +305,25 @@ void mtd_erase_callback(struct erase_inf
+ {
+ if (instr->mtd->_erase == part_erase) {
+ struct mtd_part *part = mtd_to_part(instr->mtd);
++ size_t wrlen = 0;
++
++ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
++ if (instr->partial_start) {
++ part->parent->_write(part->parent,
++ instr->addr, instr->erase_buf_ofs,
++ &wrlen, instr->erase_buf);
++ instr->addr += instr->erase_buf_ofs;
++ } else {
++ instr->len -= instr->erase_buf_ofs;
++ part->parent->_write(part->parent,
++ instr->addr + instr->len,
++ instr->erase_buf_ofs, &wrlen,
++ instr->erase_buf +
++ part->parent->erasesize -
++ instr->erase_buf_ofs);
++ }
++ kfree(instr->erase_buf);
++ }
+
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+@@ -598,19 +667,22 @@ static struct mtd_part *allocate_partiti
+ remainder = do_div(tmp, wr_alignment);
+ if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+ /* Doesn't start on a boundary of major erase size */
+- /* FIXME: Let it be writable if it is on a boundary of
+- * _minor_ erase size though */
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
+- part->name);
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++ if (((u32)slave->mtd.size) > parent->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
+
+- tmp = slave->mtd.size;
++ tmp = slave->offset + slave->mtd.size;
+ remainder = do_div(tmp, wr_alignment);
+ if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
+- part->name);
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++
++ if ((u32)slave->mtd.size > parent->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
+
+ mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -56,6 +56,10 @@ struct erase_info {
+ u_long priv;
+ u_char state;
+ struct erase_info *next;
++
++ u8 *erase_buf;
++ u32 erase_buf_ofs;
++ bool partial_start;
+ };
+
+ struct mtd_erase_region_info {
diff --git a/target/linux/generic/pending-4.14/412-mtd-partial_eraseblock_unlock.patch b/target/linux/generic/pending-4.14/412-mtd-partial_eraseblock_unlock.patch
new file mode 100644
index 0000000..fd6ffc6
--- /dev/null
+++ b/target/linux/generic/pending-4.14/412-mtd-partial_eraseblock_unlock.patch
@@ -0,0 +1,40 @@
+From: Tim Harvey <tharvey@gateworks.com>
+Subject: mtd: allow partial block unlock
+
+This allows sysupgrade for devices such as the Gateworks Avila/Cambria
+product families based on the ixp4xx using the redboot bootloader with
+combined FIS directory and RedBoot config partitions on larger FLASH
+devices with larger eraseblocks.
+
+This second iteration of this patch addresses previous issues:
+- whitespace breakage fixed
+- unlock in all scenarios
+- simplification and fix logic bug
+
+[john@phrozen.org: this should be moved to the ixp4xx folder]
+
+Signed-off-by: Tim Harvey <tharvey@gateworks.com>
+---
+ drivers/mtd/mtdpart.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -343,7 +343,16 @@ static int part_lock(struct mtd_info *mt
+ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+ {
+ struct mtd_part *part = mtd_to_part(mtd);
+- return part->parent->_unlock(part->parent, ofs + part->offset, len);
++
++ ofs += part->offset;
++
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ /* round up len to next erasesize and round down offset to prev block */
++ len = (mtd_div_by_eb(len, part->parent) + 1) * part->parent->erasesize;
++ ofs &= ~(part->parent->erasesize - 1);
++ }
++
++ return part->parent->_unlock(part->parent, ofs, len);
+ }
+
+ static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
diff --git a/target/linux/generic/pending-4.14/420-mtd-redboot_space.patch b/target/linux/generic/pending-4.14/420-mtd-redboot_space.patch
new file mode 100644
index 0000000..85fbe05
--- /dev/null
+++ b/target/linux/generic/pending-4.14/420-mtd-redboot_space.patch
@@ -0,0 +1,41 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: add patch for including unpartitioned space in the rootfs partition for redboot devices (if applicable)
+
+[john@phrozen.org: used by ixp and others]
+
+lede-commit: 394918851f84e4d00fa16eb900e7700e95091f00
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/redboot.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -265,14 +265,21 @@ static int parse_redboot_partitions(stru
+ #endif
+ names += strlen(names)+1;
+
+-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+- i++;
+- parts[i].offset = parts[i-1].size + parts[i-1].offset;
+- parts[i].size = fl->next->img->flash_base - parts[i].offset;
+- parts[i].name = nullname;
+- }
++ if (!strcmp(parts[i].name, "rootfs")) {
++ parts[i].size = fl->next->img->flash_base;
++ parts[i].size &= ~(master->erasesize - 1);
++ parts[i].size -= parts[i].offset;
++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
++ nrparts--;
++ } else {
++ i++;
++ parts[i].offset = parts[i-1].size + parts[i-1].offset;
++ parts[i].size = fl->next->img->flash_base - parts[i].offset;
++ parts[i].name = nullname;
+ #endif
++ }
++ }
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
diff --git a/target/linux/generic/pending-4.14/430-mtd-add-myloader-partition-parser.patch b/target/linux/generic/pending-4.14/430-mtd-add-myloader-partition-parser.patch
new file mode 100644
index 0000000..889c452
--- /dev/null
+++ b/target/linux/generic/pending-4.14/430-mtd-add-myloader-partition-parser.patch
@@ -0,0 +1,47 @@
+From: Florian Fainelli <f.fainelli@gmail.com>
+Subject: Add myloader partition table parser
+
+[john@phozen.org: shoud be upstreamable]
+
+lede-commit: d8bf22859b51faa09d22c056fe221a45d2f7a3b8
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+---
+ drivers/mtd/Kconfig | 16 ++++++++++++++++
+ drivers/mtd/Makefile | 1 +
+ 2 files changed, 17 insertions(+)
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -178,6 +178,22 @@ menu "Partition parsers"
+ source "drivers/mtd/parsers/Kconfig"
+ endmenu
+
++config MTD_MYLOADER_PARTS
++ tristate "MyLoader partition parsing"
++ depends on ADM5120 || ATH25 || ATH79
++ ---help---
++ MyLoader is a bootloader which allows the user to define partitions
++ in flash devices, by putting a table in the second erase block
++ on the device, similar to a partition table. This table gives the
++ offsets and lengths of the user defined partitions.
++
++ If you need code which can detect and parse these tables, and
++ register MTD 'partitions' corresponding to each image detected,
++ enable this option.
++
++ You will still need the parsing functions to be called by the driver
++ for your particular device. It won't happen automatically.
++
+ comment "User Modules And Translation Layers"
+
+ #
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+ obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
+ obj-y += parsers/
+
+ # 'Users' - code which presents functionality to userspace.
diff --git a/target/linux/generic/pending-4.14/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch b/target/linux/generic/pending-4.14/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch
new file mode 100644
index 0000000..9299e88
--- /dev/null
+++ b/target/linux/generic/pending-4.14/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch
@@ -0,0 +1,67 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
+Subject: [PATCH] mtd: bcm47xxpart: check for bad blocks when calculating offsets
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+---
+
+--- a/drivers/mtd/parsers/parser_trx.c
++++ b/drivers/mtd/parsers/parser_trx.c
+@@ -29,6 +29,33 @@ struct trx_header {
+ uint32_t offset[3];
+ } __packed;
+
++/*
++ * Calculate real end offset (address) for a given amount of data. It checks
++ * all blocks skipping bad ones.
++ */
++static size_t parser_trx_real_offset(struct mtd_info *mtd, size_t bytes)
++{
++ size_t real_offset = 0;
++
++ if (mtd_block_isbad(mtd, real_offset))
++ pr_warn("Base offset shouldn't be at bad block");
++
++ while (bytes >= mtd->erasesize) {
++ bytes -= mtd->erasesize;
++ real_offset += mtd->erasesize;
++ while (mtd_block_isbad(mtd, real_offset)) {
++ real_offset += mtd->erasesize;
++
++ if (real_offset >= mtd->size)
++ return real_offset - mtd->erasesize;
++ }
++ }
++
++ real_offset += bytes;
++
++ return real_offset;
++}
++
+ static const char *parser_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+ {
+@@ -83,21 +110,21 @@ static int parser_trx_parse(struct mtd_i
+ if (trx.offset[2]) {
+ part = &parts[curr_part++];
+ part->name = "loader";
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = "linux";
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = parser_trx_data_part_name(mtd, trx.offset[i]);
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
diff --git a/target/linux/generic/pending-4.14/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch b/target/linux/generic/pending-4.14/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch
new file mode 100644
index 0000000..a6d0828
--- /dev/null
+++ b/target/linux/generic/pending-4.14/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch
@@ -0,0 +1,37 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
+Subject: mtd: bcm47xxpart: detect T_Meter partition
+
+It can be found on many Netgear devices. It consists of many 0x30 blocks
+starting with 4D 54.
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+---
+ drivers/mtd/bcm47xxpart.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/bcm47xxpart.c
++++ b/drivers/mtd/bcm47xxpart.c
+@@ -39,6 +39,7 @@
+ #define NVRAM_HEADER 0x48534C46 /* FLSH */
+ #define POT_MAGIC1 0x54544f50 /* POTT */
+ #define POT_MAGIC2 0x504f /* OP */
++#define T_METER_MAGIC 0x4D540000 /* MT */
+ #define ML_MAGIC1 0x39685a42
+ #define ML_MAGIC2 0x26594131
+ #define TRX_MAGIC 0x30524448
+@@ -182,6 +183,15 @@ static int bcm47xxpart_parse(struct mtd_
+ MTD_WRITEABLE);
+ continue;
+ }
++
++ /* T_Meter */
++ if ((le32_to_cpu(buf[0x000 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
++ (le32_to_cpu(buf[0x030 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
++ (le32_to_cpu(buf[0x060 / 4]) & 0xFFFF0000) == T_METER_MAGIC) {
++ bcm47xxpart_add_part(&parts[curr_part++], "T_Meter", offset,
++ MTD_WRITEABLE);
++ continue;
++ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
diff --git a/target/linux/generic/pending-4.14/440-block2mtd_init.patch b/target/linux/generic/pending-4.14/440-block2mtd_init.patch
new file mode 100644
index 0000000..8834788
--- /dev/null
+++ b/target/linux/generic/pending-4.14/440-block2mtd_init.patch
@@ -0,0 +1,116 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: block2mtd
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/devices/block2mtd.c | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -26,6 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
+ #include <linux/mutex.h>
+ #include <linux/mount.h>
+ #include <linux/slab.h>
+@@ -219,7 +220,7 @@ static void block2mtd_free_device(struct
+
+
+ static struct block2mtd_dev *add_device(char *devname, int erase_size,
+- int timeout)
++ const char *mtdname, int timeout)
+ {
+ #ifndef MODULE
+ int i;
+@@ -227,6 +228,7 @@ static struct block2mtd_dev *add_device(
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block_device *bdev = ERR_PTR(-ENODEV);
+ struct block2mtd_dev *dev;
++ struct mtd_partition *part;
+ char *name;
+
+ if (!devname)
+@@ -283,13 +285,16 @@ static struct block2mtd_dev *add_device(
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
++ if (!mtdname)
++ mtdname = devname;
++ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
+ if (!name)
+ goto err_destroy_mutex;
+
++ strcpy(name, mtdname);
+ dev->mtd.name = name;
+
+- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
++ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
+@@ -302,7 +307,11 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+- if (mtd_device_register(&dev->mtd, NULL, 0)) {
++ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++ part->name = name;
++ part->offset = 0;
++ part->size = dev->mtd.size;
++ if (mtd_device_register(&dev->mtd, part, 1)) {
+ /* Device didn't get added, so free the entry */
+ goto err_destroy_mutex;
+ }
+@@ -310,8 +319,7 @@ static struct block2mtd_dev *add_device(
+ list_add(&dev->list, &blkmtd_device_list);
+ pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+ dev->mtd.index,
+- dev->mtd.name + strlen("block2mtd: "),
+- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
++ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+ err_destroy_mutex:
+@@ -384,7 +392,7 @@ static int block2mtd_setup2(const char *
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
+ char *str = buf;
+- char *token[2];
++ char *token[3];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
+@@ -398,7 +406,7 @@ static int block2mtd_setup2(const char *
+ strcpy(str, val);
+ kill_final_newline(str);
+
+- for (i = 0; i < 2; i++)
++ for (i = 0; i < 3; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str) {
+@@ -424,8 +432,10 @@ static int block2mtd_setup2(const char *
+ return 0;
+ }
+ }
++ if (token[2] && (strlen(token[2]) + 1 > 80))
++ pr_err("mtd device name too long\n");
+
+- add_device(name, erase_size, timeout);
++ add_device(name, erase_size, token[2], timeout);
+
+ return 0;
+ }
+@@ -459,7 +469,7 @@ static int block2mtd_setup(const char *v
+
+
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+
+ static int __init block2mtd_init(void)
+ {
diff --git a/target/linux/generic/pending-4.14/441-block2mtd_probe.patch b/target/linux/generic/pending-4.14/441-block2mtd_probe.patch
new file mode 100644
index 0000000..fee970a
--- /dev/null
+++ b/target/linux/generic/pending-4.14/441-block2mtd_probe.patch
@@ -0,0 +1,47 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: block2mtd
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/devices/block2mtd.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -392,7 +392,7 @@ static int block2mtd_setup2(const char *
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
+ char *str = buf;
+- char *token[3];
++ char *token[4];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
+@@ -406,7 +406,7 @@ static int block2mtd_setup2(const char *
+ strcpy(str, val);
+ kill_final_newline(str);
+
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str) {
+@@ -435,6 +435,9 @@ static int block2mtd_setup2(const char *
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ pr_err("mtd device name too long\n");
+
++ if (token[3] && kstrtoul(token[3], 0, &timeout))
++ pr_err("invalid timeout\n");
++
+ add_device(name, erase_size, token[2], timeout);
+
+ return 0;
+@@ -469,7 +472,7 @@ static int block2mtd_setup(const char *v
+
+
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\"");
+
+ static int __init block2mtd_init(void)
+ {
diff --git a/target/linux/generic/pending-4.14/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch b/target/linux/generic/pending-4.14/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch
new file mode 100644
index 0000000..41f9d31
--- /dev/null
+++ b/target/linux/generic/pending-4.14/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch
@@ -0,0 +1,25 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: disable cfi cmdset 0002 erase suspend
+
+on some platforms, erase suspend leads to data corruption and lockups when write
+ops collide with erase ops. this has been observed on the buffalo wzr-hp-g300nh.
+rather than play whack-a-mole with a hard to reproduce issue on a variety of devices,
+simply disable erase suspend, as it will usually not produce any useful gain on
+the small filesystems used on embedded hardware.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -811,7 +811,7 @@ static int get_chip(struct map_info *map
+ return 0;
+
+ case FL_ERASING:
+- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
diff --git a/target/linux/generic/pending-4.14/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/pending-4.14/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
new file mode 100644
index 0000000..f4ba5b7
--- /dev/null
+++ b/target/linux/generic/pending-4.14/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
@@ -0,0 +1,17 @@
+From: George Kashperko <george@znau.edu.ua>
+Subject: Issue map read after Write Buffer Load command to ensure chip is ready to receive data.
+
+Signed-off-by: George Kashperko <george@znau.edu.ua>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
+ 1 file changed, 1 insertion(+)
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -1832,6 +1832,7 @@ static int __xipram do_write_buffer(stru
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
++ (void) map_read(map, cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
diff --git a/target/linux/generic/pending-4.14/465-m25p80-mx-disable-software-protection.patch b/target/linux/generic/pending-4.14/465-m25p80-mx-disable-software-protection.patch
new file mode 100644
index 0000000..89d38ec
--- /dev/null
+++ b/target/linux/generic/pending-4.14/465-m25p80-mx-disable-software-protection.patch
@@ -0,0 +1,18 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: Disable software protection bits for Macronix flashes.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2715,6 +2715,7 @@ int spi_nor_scan(struct spi_nor *nor, co
+
+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(info) == SNOR_MFR_INTEL ||
++ JEDEC_MFR(info) == SNOR_MFR_MACRONIX ||
+ JEDEC_MFR(info) == SNOR_MFR_SST ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
diff --git a/target/linux/generic/pending-4.14/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch b/target/linux/generic/pending-4.14/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch
new file mode 100644
index 0000000..10db289
--- /dev/null
+++ b/target/linux/generic/pending-4.14/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch
@@ -0,0 +1,56 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 4 Nov 2017 07:40:23 +0100
+Subject: [PATCH] mtd: spi-nor: support limiting 4K sectors support based on
+ flash size
+
+Some devices need 4K sectors to be able to deal with small flash chips.
+For instance, w25x05 is 64 KiB in size, and without 4K sectors, the
+entire chip is just one erase block.
+On bigger flash chip sizes, using 4K sectors can significantly slow down
+many operations, including using a writable filesystem. There are several
+platforms where it makes sense to use a single kernel on both kinds of
+devices.
+
+To support this properly, allow configuring an upper flash chip size
+limit for 4K sectors support.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/mtd/spi-nor/Kconfig
++++ b/drivers/mtd/spi-nor/Kconfig
+@@ -39,6 +39,17 @@ config SPI_ASPEED_SMC
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
++config MTD_SPI_NOR_USE_4K_SECTORS_LIMIT
++ int "Maximum flash chip size to use 4K sectors on (in KiB)"
++ depends on MTD_SPI_NOR_USE_4K_SECTORS
++ default "4096"
++ help
++ There are many flash chips that support 4K sectors, but are so large
++ that using them significantly slows down writing large amounts of
++ data or using a writable filesystem.
++ Any flash chip larger than the size specified in this option will
++ not use 4K sectors.
++
+ config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2562,10 +2562,12 @@ static int spi_nor_select_erase(struct s
+
+ #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+- if (info->flags & SECT_4K) {
++ if ((info->flags & SECT_4K) && (mtd->size <=
++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+- } else if (info->flags & SECT_4K_PMC) {
++ } else if ((info->flags & SECT_4K_PMC) && (mtd->size <=
++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else
diff --git a/target/linux/generic/pending-4.14/476-mtd-spi-nor-add-eon-en25q128.patch b/target/linux/generic/pending-4.14/476-mtd-spi-nor-add-eon-en25q128.patch
new file mode 100644
index 0000000..ac1fda5
--- /dev/null
+++ b/target/linux/generic/pending-4.14/476-mtd-spi-nor-add-eon-en25q128.patch
@@ -0,0 +1,18 @@
+From: Piotr Dymacz <pepe2k@gmail.com>
+Subject: kernel/mtd: add support for EON EN25Q128
+
+Signed-off-by: Piotr Dymacz <pepe2k@gmail.com>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -954,6 +954,7 @@ static const struct flash_info spi_nor_i
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
++ { "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
diff --git a/target/linux/generic/pending-4.14/477-mtd-add-spi-nor-add-mx25u3235f.patch b/target/linux/generic/pending-4.14/477-mtd-add-spi-nor-add-mx25u3235f.patch
new file mode 100644
index 0000000..191e01d
--- /dev/null
+++ b/target/linux/generic/pending-4.14/477-mtd-add-spi-nor-add-mx25u3235f.patch
@@ -0,0 +1,18 @@
+From: André Valentin <avalentin@marcant.net>
+Subject: linux/mtd: add id for mx25u3235f needed by ZyXEL NBG6817
+
+Signed-off-by: André Valentin <avalentin@marcant.net>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1016,6 +1016,7 @@ static const struct flash_info spi_nor_i
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
++ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, 0) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
diff --git a/target/linux/generic/pending-4.14/480-mtd-set-rootfs-to-be-root-dev.patch b/target/linux/generic/pending-4.14/480-mtd-set-rootfs-to-be-root-dev.patch
new file mode 100644
index 0000000..6cddaf0
--- /dev/null
+++ b/target/linux/generic/pending-4.14/480-mtd-set-rootfs-to-be-root-dev.patch
@@ -0,0 +1,38 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.1[02]: move MTD root device setup code to mtdcore
+
+The current code only allows to automatically set
+root device on MTD partitions. Move the code to MTD
+core to allow to use it with all MTD devices.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdcore.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -41,6 +41,7 @@
+ #include <linux/reboot.h>
+ #include <linux/leds.h>
+ #include <linux/debugfs.h>
++#include <linux/root_dev.h>
+
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+@@ -578,6 +579,15 @@ int add_mtd_device(struct mtd_info *mtd)
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
++
++ if (!strcmp(mtd->name, "rootfs") &&
++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ ROOT_DEV == 0) {
++ pr_notice("mtd: device %d (%s) set to be root filesystem\n",
++ mtd->index, mtd->name);
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
++ }
++
+ return 0;
+
+ fail_added:
diff --git a/target/linux/generic/pending-4.14/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch b/target/linux/generic/pending-4.14/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch
new file mode 100644
index 0000000..523a69c
--- /dev/null
+++ b/target/linux/generic/pending-4.14/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch
@@ -0,0 +1,73 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: auto-attach mtd device named "ubi" or "data" on boot
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1157,6 +1157,49 @@ static struct mtd_info * __init open_mtd
+ return mtd;
+ }
+
++/*
++ * This function tries attaching mtd partitions named either "ubi" or "data"
++ * during boot.
++ */
++static void __init ubi_auto_attach(void)
++{
++ int err;
++ struct mtd_info *mtd;
++
++ /* try attaching mtd device named "ubi" or "data" */
++ mtd = open_mtd_device("ubi");
++ if (IS_ERR(mtd))
++ mtd = open_mtd_device("data");
++
++ if (!IS_ERR(mtd)) {
++ size_t len;
++ char magic[4];
++
++ /* check for a valid ubi magic */
++ err = mtd_read(mtd, 0, 4, &len, (void *) magic);
++ if (!err && len == 4 && strncmp(magic, "UBI#", 4)) {
++ pr_err("UBI error: no valid UBI magic found inside mtd%d", mtd->index);
++ put_mtd_device(mtd);
++ return;
++ }
++
++ /* auto-add only media types where UBI makes sense */
++ if (mtd->type == MTD_NANDFLASH ||
++ mtd->type == MTD_NORFLASH ||
++ mtd->type == MTD_DATAFLASH ||
++ mtd->type == MTD_MLCNANDFLASH) {
++ mutex_lock(&ubi_devices_mutex);
++ pr_notice("UBI: auto-attach mtd%d\n", mtd->index);
++ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0);
++ mutex_unlock(&ubi_devices_mutex);
++ if (err < 0) {
++ pr_err("UBI error: cannot attach mtd%d", mtd->index);
++ put_mtd_device(mtd);
++ }
++ }
++ }
++}
++
+ static int __init ubi_init(void)
+ {
+ int err, i, k;
+@@ -1240,6 +1283,12 @@ static int __init ubi_init(void)
+ }
+ }
+
++ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd
++ * parameter was given */
++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ !ubi_is_module() && !mtd_devs)
++ ubi_auto_attach();
++
+ err = ubiblock_init();
+ if (err) {
+ pr_err("UBI error: block: cannot initialize, error %d\n", err);
diff --git a/target/linux/generic/pending-4.14/491-ubi-auto-create-ubiblock-device-for-rootfs.patch b/target/linux/generic/pending-4.14/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
new file mode 100644
index 0000000..f8dc1bc
--- /dev/null
+++ b/target/linux/generic/pending-4.14/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
@@ -0,0 +1,66 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: auto-create ubiblock device for rootfs
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -625,6 +625,44 @@ static void __init ubiblock_create_from_
+ }
+ }
+
++#define UBIFS_NODE_MAGIC 0x06101831
++static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
++{
++ int ret;
++ uint32_t magic_of, magic;
++ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
++ if (ret)
++ return 0;
++ magic = le32_to_cpu(magic_of);
++ return magic == UBIFS_NODE_MAGIC;
++}
++
++static void __init ubiblock_create_auto_rootfs(void)
++{
++ int ubi_num, ret, is_ubifs;
++ struct ubi_volume_desc *desc;
++ struct ubi_volume_info vi;
++
++ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
++ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
++ if (IS_ERR(desc))
++ continue;
++
++ ubi_get_volume_info(desc, &vi);
++ is_ubifs = ubi_vol_is_ubifs(desc);
++ ubi_close_volume(desc);
++ if (is_ubifs)
++ break;
++
++ ret = ubiblock_create(&vi);
++ if (ret)
++ pr_err("UBI error: block: can't add '%s' volume, err=%d\n",
++ vi.name, ret);
++ /* always break if we get here */
++ break;
++ }
++}
++
+ static void ubiblock_remove_all(void)
+ {
+ struct ubiblock *next;
+@@ -655,6 +693,10 @@ int __init ubiblock_init(void)
+ */
+ ubiblock_create_from_param();
+
++ /* auto-attach "rootfs" volume if existing and non-ubifs */
++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV))
++ ubiblock_create_auto_rootfs();
++
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
diff --git a/target/linux/generic/pending-4.14/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch b/target/linux/generic/pending-4.14/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch
new file mode 100644
index 0000000..69a35c2
--- /dev/null
+++ b/target/linux/generic/pending-4.14/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch
@@ -0,0 +1,51 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: try auto-mounting ubi0:rootfs in init/do_mounts.c
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ init/do_mounts.c | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -438,7 +438,28 @@ retry:
+ out:
+ put_page(page);
+ }
+-
++
++static int __init mount_ubi_rootfs(void)
++{
++ int flags = MS_SILENT;
++ int err, tried = 0;
++
++ while (tried < 2) {
++ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \
++ root_mount_data);
++ switch (err) {
++ case -EACCES:
++ flags |= MS_RDONLY;
++ tried++;
++ break;
++ default:
++ return err;
++ }
++ }
++
++ return -EINVAL;
++}
++
+ #ifdef CONFIG_ROOT_NFS
+
+ #define NFSROOT_TIMEOUT_MIN 5
+@@ -532,6 +553,10 @@ void __init mount_root(void)
+ change_floppy("root floppy");
+ }
+ #endif
++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
++ if (!mount_ubi_rootfs())
++ return;
++#endif
+ #ifdef CONFIG_BLOCK
+ {
+ int err = create_dev("/dev/root", ROOT_DEV);
diff --git a/target/linux/generic/pending-4.14/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch b/target/linux/generic/pending-4.14/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch
new file mode 100644
index 0000000..06cd1b6
--- /dev/null
+++ b/target/linux/generic/pending-4.14/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch
@@ -0,0 +1,34 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: set ROOT_DEV to ubiblock "rootfs" if unset
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/block.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -50,6 +50,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/idr.h>
+ #include <asm/div64.h>
++#include <linux/root_dev.h>
+
+ #include "ubi-media.h"
+ #include "ubi.h"
+@@ -445,6 +446,15 @@ int ubiblock_create(struct ubi_volume_in
+ add_disk(dev->gd);
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
+ dev->ubi_num, dev->vol_id, vi->name);
++
++ if (!strcmp(vi->name, "rootfs") &&
++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ ROOT_DEV == 0) {
++ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
++ dev->ubi_num, dev->vol_id, vi->name);
++ ROOT_DEV = MKDEV(gd->major, gd->first_minor);
++ }
++
+ return 0;
+
+ out_free_queue:
diff --git a/target/linux/generic/pending-4.14/494-mtd-ubi-add-EOF-marker-support.patch b/target/linux/generic/pending-4.14/494-mtd-ubi-add-EOF-marker-support.patch
new file mode 100644
index 0000000..99cc0f6
--- /dev/null
+++ b/target/linux/generic/pending-4.14/494-mtd-ubi-add-EOF-marker-support.patch
@@ -0,0 +1,60 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: mtd: add EOF marker support to the UBI layer
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/ubi/attach.c | 25 ++++++++++++++++++++++---
+ drivers/mtd/ubi/ubi.h | 1 +
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -939,6 +939,13 @@ static bool vol_ignored(int vol_id)
+ #endif
+ }
+
++static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech)
++{
++ return ech->padding1[0] == 'E' &&
++ ech->padding1[1] == 'O' &&
++ ech->padding1[2] == 'F';
++}
++
+ /**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+@@ -971,9 +978,21 @@ static int scan_peb(struct ubi_device *u
+ return 0;
+ }
+
+- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+- if (err < 0)
+- return err;
++ if (!ai->eof_found) {
++ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
++ if (err < 0)
++ return err;
++
++ if (ec_hdr_has_eof(ech)) {
++ pr_notice("UBI: EOF marker found, PEBs from %d will be erased",
++ pnum);
++ ai->eof_found = true;
++ }
++ }
++
++ if (ai->eof_found)
++ err = UBI_IO_FF_BITFLIPS;
++
+ switch (err) {
+ case 0:
+ break;
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -778,6 +778,7 @@ struct ubi_attach_info {
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
++ bool eof_found;
+ struct kmem_cache *aeb_slab_cache;
+ struct ubi_ec_hdr *ech;
+ struct ubi_vid_io_buf *vidb;
diff --git a/target/linux/generic/pending-4.14/530-jffs2_make_lzma_available.patch b/target/linux/generic/pending-4.14/530-jffs2_make_lzma_available.patch
new file mode 100644
index 0000000..046e9d5
--- /dev/null
+++ b/target/linux/generic/pending-4.14/530-jffs2_make_lzma_available.patch
@@ -0,0 +1,5180 @@
+From: Alexandros C. Couloumbis <alex@ozo.com>
+Subject: fs: add jffs2/lzma support (not activated by default yet)
+
+lede-commit: c2c88d315fa0e881f8b19da07b62859b915b11b2
+Signed-off-by: Alexandros C. Couloumbis <alex@ozo.com>
+---
+ fs/jffs2/Kconfig | 9 +
+ fs/jffs2/Makefile | 3 +
+ fs/jffs2/compr.c | 6 +
+ fs/jffs2/compr.h | 10 +-
+ fs/jffs2/compr_lzma.c | 128 +++
+ fs/jffs2/super.c | 33 +-
+ include/linux/lzma.h | 62 ++
+ include/linux/lzma/LzFind.h | 115 +++
+ include/linux/lzma/LzHash.h | 54 +
+ include/linux/lzma/LzmaDec.h | 231 +++++
+ include/linux/lzma/LzmaEnc.h | 80 ++
+ include/linux/lzma/Types.h | 226 +++++
+ include/uapi/linux/jffs2.h | 1 +
+ lib/Kconfig | 6 +
+ lib/Makefile | 12 +
+ lib/lzma/LzFind.c | 761 ++++++++++++++
+ lib/lzma/LzmaDec.c | 999 +++++++++++++++++++
+ lib/lzma/LzmaEnc.c | 2271 ++++++++++++++++++++++++++++++++++++++++++
+ lib/lzma/Makefile | 7 +
+ 19 files changed, 5008 insertions(+), 6 deletions(-)
+ create mode 100644 fs/jffs2/compr_lzma.c
+ create mode 100644 include/linux/lzma.h
+ create mode 100644 include/linux/lzma/LzFind.h
+ create mode 100644 include/linux/lzma/LzHash.h
+ create mode 100644 include/linux/lzma/LzmaDec.h
+ create mode 100644 include/linux/lzma/LzmaEnc.h
+ create mode 100644 include/linux/lzma/Types.h
+ create mode 100644 lib/lzma/LzFind.c
+ create mode 100644 lib/lzma/LzmaDec.c
+ create mode 100644 lib/lzma/LzmaEnc.c
+ create mode 100644 lib/lzma/Makefile
+
+--- a/fs/jffs2/Kconfig
++++ b/fs/jffs2/Kconfig
+@@ -139,6 +139,15 @@ config JFFS2_LZO
+ This feature was added in July, 2007. Say 'N' if you need
+ compatibility with older bootloaders or kernels.
+
++config JFFS2_LZMA
++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
++ select LZMA_COMPRESS
++ select LZMA_DECOMPRESS
++ depends on JFFS2_FS
++ default n
++ help
++ JFFS2 wrapper to the LZMA C SDK
++
+ config JFFS2_RTIME
+ bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+ depends on JFFS2_FS
+--- a/fs/jffs2/Makefile
++++ b/fs/jffs2/Makefile
+@@ -19,4 +19,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub
+ jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+ jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+ jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o
++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o
+ jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
++
++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
+--- a/fs/jffs2/compr.c
++++ b/fs/jffs2/compr.c
+@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void)
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_init();
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_init();
++#endif
+ /* Setting default compression mode */
+ #ifdef CONFIG_JFFS2_CMODE_NONE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void)
+ int jffs2_compressors_exit(void)
+ {
+ /* Unregistering compressors */
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_exit();
++#endif
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_exit();
+ #endif
+--- a/fs/jffs2/compr.h
++++ b/fs/jffs2/compr.h
+@@ -29,9 +29,9 @@
+ #define JFFS2_DYNRUBIN_PRIORITY 20
+ #define JFFS2_LZARI_PRIORITY 30
+ #define JFFS2_RTIME_PRIORITY 50
+-#define JFFS2_ZLIB_PRIORITY 60
+-#define JFFS2_LZO_PRIORITY 80
+-
++#define JFFS2_LZMA_PRIORITY 70
++#define JFFS2_ZLIB_PRIORITY 80
++#define JFFS2_LZO_PRIORITY 90
+
+ #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+ #define JFFS2_DYNRUBIN_DISABLED /* for decompression */
+@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
+ int jffs2_lzo_init(void);
+ void jffs2_lzo_exit(void);
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++int jffs2_lzma_init(void);
++void jffs2_lzma_exit(void);
++#endif
+
+ #endif /* __JFFS2_COMPR_H__ */
+--- /dev/null
++++ b/fs/jffs2/compr_lzma.c
+@@ -0,0 +1,128 @@
++/*
++ * JFFS2 -- Journalling Flash File System, Version 2.
++ *
++ * For licensing information, see the file 'LICENCE' in this directory.
++ *
++ * JFFS2 wrapper to the LZMA C SDK
++ *
++ */
++
++#include <linux/lzma.h>
++#include "compr.h"
++
++#ifdef __KERNEL__
++ static DEFINE_MUTEX(deflate_mutex);
++#endif
++
++CLzmaEncHandle *p;
++Byte propsEncoded[LZMA_PROPS_SIZE];
++SizeT propsSize = sizeof(propsEncoded);
++
++STATIC void lzma_free_workspace(void)
++{
++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
++}
++
++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
++{
++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
++ {
++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
++ return -ENOMEM;
++ }
++
++ if (LzmaEnc_SetProps(p, props) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t *sourcelen, uint32_t *dstlen)
++{
++ SizeT compress_size = (SizeT)(*dstlen);
++ int ret;
++
++ #ifdef __KERNEL__
++ mutex_lock(&deflate_mutex);
++ #endif
++
++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
++ 0, NULL, &lzma_alloc, &lzma_alloc);
++
++ #ifdef __KERNEL__
++ mutex_unlock(&deflate_mutex);
++ #endif
++
++ if (ret != SZ_OK)
++ return -1;
++
++ *dstlen = (uint32_t)compress_size;
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t srclen, uint32_t destlen)
++{
++ int ret;
++ SizeT dl = (SizeT)destlen;
++ SizeT sl = (SizeT)srclen;
++ ELzmaStatus status;
++
++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
++
++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
++ return -1;
++
++ return 0;
++}
++
++static struct jffs2_compressor jffs2_lzma_comp = {
++ .priority = JFFS2_LZMA_PRIORITY,
++ .name = "lzma",
++ .compr = JFFS2_COMPR_LZMA,
++ .compress = &jffs2_lzma_compress,
++ .decompress = &jffs2_lzma_decompress,
++ .disabled = 0,
++};
++
++int INIT jffs2_lzma_init(void)
++{
++ int ret;
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++
++ props.dictSize = LZMA_BEST_DICT(0x2000);
++ props.level = LZMA_BEST_LEVEL;
++ props.lc = LZMA_BEST_LC;
++ props.lp = LZMA_BEST_LP;
++ props.pb = LZMA_BEST_PB;
++ props.fb = LZMA_BEST_FB;
++
++ ret = lzma_alloc_workspace(&props);
++ if (ret < 0)
++ return ret;
++
++ ret = jffs2_register_compressor(&jffs2_lzma_comp);
++ if (ret)
++ lzma_free_workspace();
++
++ return ret;
++}
++
++void jffs2_lzma_exit(void)
++{
++ jffs2_unregister_compressor(&jffs2_lzma_comp);
++ lzma_free_workspace();
++}
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -372,14 +372,41 @@ static int __init init_jffs2_fs(void)
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
+- pr_info("version 2.2."
++ pr_info("version 2.2"
+ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
+ #endif
+ #ifdef CONFIG_JFFS2_SUMMARY
+- " (SUMMARY) "
++ " (SUMMARY)"
+ #endif
+- " © 2001-2006 Red Hat, Inc.\n");
++#ifdef CONFIG_JFFS2_ZLIB
++ " (ZLIB)"
++#endif
++#ifdef CONFIG_JFFS2_LZO
++ " (LZO)"
++#endif
++#ifdef CONFIG_JFFS2_LZMA
++ " (LZMA)"
++#endif
++#ifdef CONFIG_JFFS2_RTIME
++ " (RTIME)"
++#endif
++#ifdef CONFIG_JFFS2_RUBIN
++ " (RUBIN)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_NONE
++ " (CMODE_NONE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_PRIORITY
++ " (CMODE_PRIORITY)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_SIZE
++ " (CMODE_SIZE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
++ " (CMODE_FAVOURLZO)"
++#endif
++ " (c) 2001-2006 Red Hat, Inc.\n");
+
+ jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+ sizeof(struct jffs2_inode_info),
+--- /dev/null
++++ b/include/linux/lzma.h
+@@ -0,0 +1,62 @@
++#ifndef __LZMA_H__
++#define __LZMA_H__
++
++#ifdef __KERNEL__
++ #include <linux/kernel.h>
++ #include <linux/sched.h>
++ #include <linux/slab.h>
++ #include <linux/vmalloc.h>
++ #include <linux/init.h>
++ #define LZMA_MALLOC vmalloc
++ #define LZMA_FREE vfree
++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
++ #define INIT __init
++ #define STATIC static
++#else
++ #include <stdint.h>
++ #include <stdlib.h>
++ #include <stdio.h>
++ #include <unistd.h>
++ #include <string.h>
++ #include <asm/types.h>
++ #include <errno.h>
++ #include <linux/jffs2.h>
++ #ifndef PAGE_SIZE
++ extern int page_size;
++ #define PAGE_SIZE page_size
++ #endif
++ #define LZMA_MALLOC malloc
++ #define LZMA_FREE free
++ #define PRINT_ERROR(msg) fprintf(stderr, msg)
++ #define INIT
++ #define STATIC
++#endif
++
++#include "lzma/LzmaDec.h"
++#include "lzma/LzmaEnc.h"
++
++#define LZMA_BEST_LEVEL (9)
++#define LZMA_BEST_LC (0)
++#define LZMA_BEST_LP (0)
++#define LZMA_BEST_PB (0)
++#define LZMA_BEST_FB (273)
++
++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
++
++static void *p_lzma_malloc(void *p, size_t size)
++{
++ if (size == 0)
++ return NULL;
++
++ return LZMA_MALLOC(size);
++}
++
++static void p_lzma_free(void *p, void *address)
++{
++ if (address != NULL)
++ LZMA_FREE(address);
++}
++
++static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzFind.h
+@@ -0,0 +1,115 @@
++/* LzFind.h -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_FIND_H
++#define __LZ_FIND_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef UInt32 CLzRef;
++
++typedef struct _CMatchFinder
++{
++ Byte *buffer;
++ UInt32 pos;
++ UInt32 posLimit;
++ UInt32 streamPos;
++ UInt32 lenLimit;
++
++ UInt32 cyclicBufferPos;
++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
++
++ UInt32 matchMaxLen;
++ CLzRef *hash;
++ CLzRef *son;
++ UInt32 hashMask;
++ UInt32 cutValue;
++
++ Byte *bufferBase;
++ ISeqInStream *stream;
++ int streamEndWasReached;
++
++ UInt32 blockSize;
++ UInt32 keepSizeBefore;
++ UInt32 keepSizeAfter;
++
++ UInt32 numHashBytes;
++ int directInput;
++ size_t directInputRem;
++ int btMode;
++ int bigHash;
++ UInt32 historySize;
++ UInt32 fixedHashSize;
++ UInt32 hashSizeSum;
++ UInt32 numSons;
++ SRes result;
++ UInt32 crc[256];
++} CMatchFinder;
++
++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
++
++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
++
++int MatchFinder_NeedMove(CMatchFinder *p);
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
++void MatchFinder_MoveBlock(CMatchFinder *p);
++void MatchFinder_ReadIfRequired(CMatchFinder *p);
++
++void MatchFinder_Construct(CMatchFinder *p);
++
++/* Conditions:
++ historySize <= 3 GB
++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
++*/
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc);
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
++ UInt32 *distances, UInt32 maxLen);
++
++/*
++Conditions:
++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
++*/
++
++typedef void (*Mf_Init_Func)(void *object);
++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
++typedef void (*Mf_Skip_Func)(void *object, UInt32);
++
++typedef struct _IMatchFinder
++{
++ Mf_Init_Func Init;
++ Mf_GetIndexByte_Func GetIndexByte;
++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
++ Mf_GetMatches_Func GetMatches;
++ Mf_Skip_Func Skip;
++} IMatchFinder;
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
++
++void MatchFinder_Init(CMatchFinder *p);
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzHash.h
+@@ -0,0 +1,54 @@
++/* LzHash.h -- HASH functions for LZ algorithms
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_HASH_H
++#define __LZ_HASH_H
++
++#define kHash2Size (1 << 10)
++#define kHash3Size (1 << 16)
++#define kHash4Size (1 << 20)
++
++#define kFix3HashSize (kHash2Size)
++#define kFix4HashSize (kHash2Size + kHash3Size)
++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
++
++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
++
++#define HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
++
++#define HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
++
++#define HASH5_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
++ hash4Value &= (kHash4Size - 1); }
++
++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
++
++
++#define MT_HASH2_CALC \
++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
++
++#define MT_HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
++
++#define MT_HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaDec.h
+@@ -0,0 +1,231 @@
++/* LzmaDec.h -- LZMA Decoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_DEC_H
++#define __LZMA_DEC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* #define _LZMA_PROB32 */
++/* _LZMA_PROB32 can increase the speed on some CPUs,
++ but memory usage for CLzmaDec::probs will be doubled in that case */
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++
++/* ---------- LZMA Properties ---------- */
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaProps
++{
++ unsigned lc, lp, pb;
++ UInt32 dicSize;
++} CLzmaProps;
++
++/* LzmaProps_Decode - decodes properties
++Returns:
++ SZ_OK
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
++
++
++/* ---------- LZMA Decoder state ---------- */
++
++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
++
++#define LZMA_REQUIRED_INPUT_MAX 20
++
++typedef struct
++{
++ CLzmaProps prop;
++ CLzmaProb *probs;
++ Byte *dic;
++ const Byte *buf;
++ UInt32 range, code;
++ SizeT dicPos;
++ SizeT dicBufSize;
++ UInt32 processedPos;
++ UInt32 checkDicSize;
++ unsigned state;
++ UInt32 reps[4];
++ unsigned remainLen;
++ int needFlush;
++ int needInitState;
++ UInt32 numProbs;
++ unsigned tempBufSize;
++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
++} CLzmaDec;
++
++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
++
++void LzmaDec_Init(CLzmaDec *p);
++
++/* There are two types of LZMA streams:
++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
++
++typedef enum
++{
++ LZMA_FINISH_ANY, /* finish at any point */
++ LZMA_FINISH_END /* block must be finished at the end */
++} ELzmaFinishMode;
++
++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
++
++ You must use LZMA_FINISH_END, when you know that current output buffer
++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
++
++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
++ and output value of destLen will be less than output buffer size limit.
++ You can check status result also.
++
++ You can use multiple checks to test data integrity after full decompression:
++ 1) Check Result and "status" variable.
++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
++ You must use correct finish mode in that case. */
++
++typedef enum
++{
++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
++} ELzmaStatus;
++
++/* ELzmaStatus is used only as output value for function call */
++
++
++/* ---------- Interfaces ---------- */
++
++/* There are 3 levels of interfaces:
++ 1) Dictionary Interface
++ 2) Buffer Interface
++ 3) One Call Interface
++ You can select any of these interfaces, but don't mix functions from different
++ groups for same object. */
++
++
++/* There are two variants to allocate state for Dictionary Interface:
++ 1) LzmaDec_Allocate / LzmaDec_Free
++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
++ You can use variant 2, if you set dictionary buffer manually.
++ For Buffer Interface you must always use variant 1.
++
++LzmaDec_Allocate* can return:
++ SZ_OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
++
++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
++
++/* ---------- Dictionary Interface ---------- */
++
++/* You can use it, if you want to eliminate the overhead for data copying from
++ dictionary to some other external buffer.
++ You must work with CLzmaDec variables directly in this interface.
++
++ STEPS:
++ LzmaDec_Constr()
++ LzmaDec_Allocate()
++ for (each new stream)
++ {
++ LzmaDec_Init()
++ while (it needs more decompression)
++ {
++ LzmaDec_DecodeToDic()
++ use data from CLzmaDec::dic and update CLzmaDec::dicPos
++ }
++ }
++ LzmaDec_Free()
++*/
++
++/* LzmaDec_DecodeToDic
++
++ The decoding to internal dictionary buffer (CLzmaDec::dic).
++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (dicLimit).
++ LZMA_FINISH_ANY - Decode just dicLimit bytes.
++ LZMA_FINISH_END - Stream must be finished after dicLimit.
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_NEEDS_MORE_INPUT
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++*/
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- Buffer Interface ---------- */
++
++/* It's zlib-like interface.
++ See LzmaDec_DecodeToDic description for information about STEPS and return results,
++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
++ to work with CLzmaDec variables manually.
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++*/
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaDecode
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
++*/
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -0,0 +1,80 @@
++/* LzmaEnc.h -- LZMA Encoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_ENC_H
++#define __LZMA_ENC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaEncProps
++{
++ int level; /* 0 <= level <= 9 */
++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
++ default = (1 << 24) */
++ int lc; /* 0 <= lc <= 8, default = 3 */
++ int lp; /* 0 <= lp <= 4, default = 0 */
++ int pb; /* 0 <= pb <= 4, default = 2 */
++ int algo; /* 0 - fast, 1 - normal, default = 1 */
++ int fb; /* 5 <= fb <= 273, default = 32 */
++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
++ int numHashBytes; /* 2, 3 or 4, default = 4 */
++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
++ int numThreads; /* 1 or 2, default = 2 */
++} CLzmaEncProps;
++
++void LzmaEncProps_Init(CLzmaEncProps *p);
++void LzmaEncProps_Normalize(CLzmaEncProps *p);
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
++
++
++/* ---------- CLzmaEncHandle Interface ---------- */
++
++/* LzmaEnc_* functions can return the following exit codes:
++Returns:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater in props
++ SZ_ERROR_WRITE - Write callback error.
++ SZ_ERROR_PROGRESS - some break from progress callback
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++typedef void * CLzmaEncHandle;
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaEncode
++Return code:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater
++ SZ_ERROR_OUTPUT_EOF - output buffer overflow
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/Types.h
+@@ -0,0 +1,226 @@
++/* Types.h -- Basic types
++2009-11-23 : Igor Pavlov : Public domain */
++
++#ifndef __7Z_TYPES_H
++#define __7Z_TYPES_H
++
++#include <stddef.h>
++
++#ifdef _WIN32
++#include <windows.h>
++#endif
++
++#ifndef EXTERN_C_BEGIN
++#ifdef __cplusplus
++#define EXTERN_C_BEGIN extern "C" {
++#define EXTERN_C_END }
++#else
++#define EXTERN_C_BEGIN
++#define EXTERN_C_END
++#endif
++#endif
++
++EXTERN_C_BEGIN
++
++#define SZ_OK 0
++
++#define SZ_ERROR_DATA 1
++#define SZ_ERROR_MEM 2
++#define SZ_ERROR_CRC 3
++#define SZ_ERROR_UNSUPPORTED 4
++#define SZ_ERROR_PARAM 5
++#define SZ_ERROR_INPUT_EOF 6
++#define SZ_ERROR_OUTPUT_EOF 7
++#define SZ_ERROR_READ 8
++#define SZ_ERROR_WRITE 9
++#define SZ_ERROR_PROGRESS 10
++#define SZ_ERROR_FAIL 11
++#define SZ_ERROR_THREAD 12
++
++#define SZ_ERROR_ARCHIVE 16
++#define SZ_ERROR_NO_ARCHIVE 17
++
++typedef int SRes;
++
++#ifdef _WIN32
++typedef DWORD WRes;
++#else
++typedef int WRes;
++#endif
++
++#ifndef RINOK
++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
++#endif
++
++typedef unsigned char Byte;
++typedef short Int16;
++typedef unsigned short UInt16;
++
++#ifdef _LZMA_UINT32_IS_ULONG
++typedef long Int32;
++typedef unsigned long UInt32;
++#else
++typedef int Int32;
++typedef unsigned int UInt32;
++#endif
++
++#ifdef _SZ_NO_INT_64
++
++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
++ NOTES: Some code will work incorrectly in that case! */
++
++typedef long Int64;
++typedef unsigned long UInt64;
++
++#else
++
++#if defined(_MSC_VER) || defined(__BORLANDC__)
++typedef __int64 Int64;
++typedef unsigned __int64 UInt64;
++#else
++typedef long long int Int64;
++typedef unsigned long long int UInt64;
++#endif
++
++#endif
++
++#ifdef _LZMA_NO_SYSTEM_SIZE_T
++typedef UInt32 SizeT;
++#else
++typedef size_t SizeT;
++#endif
++
++typedef int Bool;
++#define True 1
++#define False 0
++
++
++#ifdef _WIN32
++#define MY_STD_CALL __stdcall
++#else
++#define MY_STD_CALL
++#endif
++
++#ifdef _MSC_VER
++
++#if _MSC_VER >= 1300
++#define MY_NO_INLINE __declspec(noinline)
++#else
++#define MY_NO_INLINE
++#endif
++
++#define MY_CDECL __cdecl
++#define MY_FAST_CALL __fastcall
++
++#else
++
++#define MY_CDECL
++#define MY_FAST_CALL
++
++#endif
++
++
++/* The following interfaces use first parameter as pointer to structure */
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) < input(*size)) is allowed */
++} ISeqInStream;
++
++/* it can return SZ_ERROR_INPUT_EOF */
++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
++
++typedef struct
++{
++ size_t (*Write)(void *p, const void *buf, size_t size);
++ /* Returns: result - the number of actually written bytes.
++ (result < size) means error */
++} ISeqOutStream;
++
++typedef enum
++{
++ SZ_SEEK_SET = 0,
++ SZ_SEEK_CUR = 1,
++ SZ_SEEK_END = 2
++} ESzSeek;
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ISeekInStream;
++
++typedef struct
++{
++ SRes (*Look)(void *p, void **buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) > input(*size)) is not allowed
++ (output(*size) < input(*size)) is allowed */
++ SRes (*Skip)(void *p, size_t offset);
++ /* offset must be <= output(*size) of Look */
++
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* reads directly (without buffer). It's same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ILookInStream;
++
++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
++
++/* reads via ILookInStream::Read */
++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
++
++#define LookToRead_BUF_SIZE (1 << 14)
++
++typedef struct
++{
++ ILookInStream s;
++ ISeekInStream *realStream;
++ size_t pos;
++ size_t size;
++ Byte buf[LookToRead_BUF_SIZE];
++} CLookToRead;
++
++void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
++void LookToRead_Init(CLookToRead *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToLook;
++
++void SecToLook_CreateVTable(CSecToLook *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToRead;
++
++void SecToRead_CreateVTable(CSecToRead *p);
++
++typedef struct
++{
++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
++ /* Returns: result. (result != SZ_OK) means break.
++ Value (UInt64)(Int64)-1 for size means unknown value. */
++} ICompressProgress;
++
++typedef struct
++{
++ void *(*Alloc)(void *p, size_t size);
++ void (*Free)(void *p, void *address); /* address can be 0 */
++} ISzAlloc;
++
++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
++#define IAlloc_Free(p, a) (p)->Free((p), a)
++
++EXTERN_C_END
++
++#endif
+--- a/include/uapi/linux/jffs2.h
++++ b/include/uapi/linux/jffs2.h
+@@ -46,6 +46,7 @@
+ #define JFFS2_COMPR_DYNRUBIN 0x05
+ #define JFFS2_COMPR_ZLIB 0x06
+ #define JFFS2_COMPR_LZO 0x07
++#define JFFS2_COMPR_LZMA 0x08
+ /* Compatibility flags. */
+ #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
+ #define JFFS2_NODE_ACCURATE 0x2000
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -259,6 +259,12 @@ config ZSTD_DECOMPRESS
+
+ source "lib/xz/Kconfig"
+
++config LZMA_COMPRESS
++ tristate
++
++config LZMA_DECOMPRESS
++ tristate
++
+ #
+ # These all provide a common interface (hence the apparent duplication with
+ # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -3,6 +3,16 @@
+ # Makefile for some libs needed in the kernel.
+ #
+
++ifdef CONFIG_JFFS2_ZLIB
++ CONFIG_ZLIB_INFLATE:=y
++ CONFIG_ZLIB_DEFLATE:=y
++endif
++
++ifdef CONFIG_JFFS2_LZMA
++ CONFIG_LZMA_DECOMPRESS:=y
++ CONFIG_LZMA_COMPRESS:=y
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+ KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+@@ -122,6 +132,8 @@ obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
+ obj-$(CONFIG_XZ_DEC) += xz/
+ obj-$(CONFIG_RAID6_PQ) += raid6/
++obj-$(CONFIG_LZMA_COMPRESS) += lzma/
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
+
+ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+--- /dev/null
++++ b/lib/lzma/LzFind.c
+@@ -0,0 +1,761 @@
++/* LzFind.c -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++#include "LzFind.h"
++#include "LzHash.h"
++
++#define kEmptyHashValue 0
++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
++#define kNormalizeMask (~(kNormalizeStepMin - 1))
++#define kMaxHistorySize ((UInt32)3 << 30)
++
++#define kStartMaxLen 3
++
++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ if (!p->directInput)
++ {
++ alloc->Free(alloc, p->bufferBase);
++ p->bufferBase = 0;
++ }
++}
++
++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
++
++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
++{
++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
++ if (p->directInput)
++ {
++ p->blockSize = blockSize;
++ return 1;
++ }
++ if (p->bufferBase == 0 || p->blockSize != blockSize)
++ {
++ LzInWindow_Free(p, alloc);
++ p->blockSize = blockSize;
++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
++ }
++ return (p->bufferBase != 0);
++}
++
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++
++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++{
++ p->posLimit -= subValue;
++ p->pos -= subValue;
++ p->streamPos -= subValue;
++}
++
++static void MatchFinder_ReadBlock(CMatchFinder *p)
++{
++ if (p->streamEndWasReached || p->result != SZ_OK)
++ return;
++ if (p->directInput)
++ {
++ UInt32 curSize = 0xFFFFFFFF - p->streamPos;
++ if (curSize > p->directInputRem)
++ curSize = (UInt32)p->directInputRem;
++ p->directInputRem -= curSize;
++ p->streamPos += curSize;
++ if (p->directInputRem == 0)
++ p->streamEndWasReached = 1;
++ return;
++ }
++ for (;;)
++ {
++ Byte *dest = p->buffer + (p->streamPos - p->pos);
++ size_t size = (p->bufferBase + p->blockSize - dest);
++ if (size == 0)
++ return;
++ p->result = p->stream->Read(p->stream, dest, &size);
++ if (p->result != SZ_OK)
++ return;
++ if (size == 0)
++ {
++ p->streamEndWasReached = 1;
++ return;
++ }
++ p->streamPos += (UInt32)size;
++ if (p->streamPos - p->pos > p->keepSizeAfter)
++ return;
++ }
++}
++
++void MatchFinder_MoveBlock(CMatchFinder *p)
++{
++ memmove(p->bufferBase,
++ p->buffer - p->keepSizeBefore,
++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
++ p->buffer = p->bufferBase + p->keepSizeBefore;
++}
++
++int MatchFinder_NeedMove(CMatchFinder *p)
++{
++ if (p->directInput)
++ return 0;
++ /* if (p->streamEndWasReached) return 0; */
++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
++}
++
++void MatchFinder_ReadIfRequired(CMatchFinder *p)
++{
++ if (p->streamEndWasReached)
++ return;
++ if (p->keepSizeAfter >= p->streamPos - p->pos)
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
++{
++ if (MatchFinder_NeedMove(p))
++ MatchFinder_MoveBlock(p);
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
++{
++ p->cutValue = 32;
++ p->btMode = 1;
++ p->numHashBytes = 4;
++ p->bigHash = 0;
++}
++
++#define kCrcPoly 0xEDB88320
++
++void MatchFinder_Construct(CMatchFinder *p)
++{
++ UInt32 i;
++ p->bufferBase = 0;
++ p->directInput = 0;
++ p->hash = 0;
++ MatchFinder_SetDefaultSettings(p);
++
++ for (i = 0; i < 256; i++)
++ {
++ UInt32 r = i;
++ int j;
++ for (j = 0; j < 8; j++)
++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
++ p->crc[i] = r;
++ }
++}
++
++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->hash);
++ p->hash = 0;
++}
++
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ LzInWindow_Free(p, alloc);
++}
++
++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
++{
++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
++ if (sizeInBytes / sizeof(CLzRef) != num)
++ return 0;
++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
++}
++
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc)
++{
++ UInt32 sizeReserv;
++ if (historySize > kMaxHistorySize)
++ {
++ MatchFinder_Free(p, alloc);
++ return 0;
++ }
++ sizeReserv = historySize >> 1;
++ if (historySize > ((UInt32)2 << 30))
++ sizeReserv = historySize >> 2;
++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
++
++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
++ if (LzInWindow_Create(p, sizeReserv, alloc))
++ {
++ UInt32 newCyclicBufferSize = historySize + 1;
++ UInt32 hs;
++ p->matchMaxLen = matchMaxLen;
++ {
++ p->fixedHashSize = 0;
++ if (p->numHashBytes == 2)
++ hs = (1 << 16) - 1;
++ else
++ {
++ hs = historySize - 1;
++ hs |= (hs >> 1);
++ hs |= (hs >> 2);
++ hs |= (hs >> 4);
++ hs |= (hs >> 8);
++ hs >>= 1;
++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
++ if (hs > (1 << 24))
++ {
++ if (p->numHashBytes == 3)
++ hs = (1 << 24) - 1;
++ else
++ hs >>= 1;
++ }
++ }
++ p->hashMask = hs;
++ hs++;
++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
++ hs += p->fixedHashSize;
++ }
++
++ {
++ UInt32 prevSize = p->hashSizeSum + p->numSons;
++ UInt32 newSize;
++ p->historySize = historySize;
++ p->hashSizeSum = hs;
++ p->cyclicBufferSize = newCyclicBufferSize;
++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
++ newSize = p->hashSizeSum + p->numSons;
++ if (p->hash != 0 && prevSize == newSize)
++ return 1;
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ p->hash = AllocRefs(newSize, alloc);
++ if (p->hash != 0)
++ {
++ p->son = p->hash + p->hashSizeSum;
++ return 1;
++ }
++ }
++ }
++ MatchFinder_Free(p, alloc);
++ return 0;
++}
++
++static void MatchFinder_SetLimits(CMatchFinder *p)
++{
++ UInt32 limit = kMaxValForNormalize - p->pos;
++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
++ if (limit2 < limit)
++ limit = limit2;
++ limit2 = p->streamPos - p->pos;
++ if (limit2 <= p->keepSizeAfter)
++ {
++ if (limit2 > 0)
++ limit2 = 1;
++ }
++ else
++ limit2 -= p->keepSizeAfter;
++ if (limit2 < limit)
++ limit = limit2;
++ {
++ UInt32 lenLimit = p->streamPos - p->pos;
++ if (lenLimit > p->matchMaxLen)
++ lenLimit = p->matchMaxLen;
++ p->lenLimit = lenLimit;
++ }
++ p->posLimit = p->pos + limit;
++}
++
++void MatchFinder_Init(CMatchFinder *p)
++{
++ UInt32 i;
++ for (i = 0; i < p->hashSizeSum; i++)
++ p->hash[i] = kEmptyHashValue;
++ p->cyclicBufferPos = 0;
++ p->buffer = p->bufferBase;
++ p->pos = p->streamPos = p->cyclicBufferSize;
++ p->result = SZ_OK;
++ p->streamEndWasReached = 0;
++ MatchFinder_ReadBlock(p);
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
++{
++ return (p->pos - p->historySize - 1) & kNormalizeMask;
++}
++
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++{
++ UInt32 i;
++ for (i = 0; i < numItems; i++)
++ {
++ UInt32 value = items[i];
++ if (value <= subValue)
++ value = kEmptyHashValue;
++ else
++ value -= subValue;
++ items[i] = value;
++ }
++}
++
++static void MatchFinder_Normalize(CMatchFinder *p)
++{
++ UInt32 subValue = MatchFinder_GetSubValue(p);
++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
++ MatchFinder_ReduceOffsets(p, subValue);
++}
++
++static void MatchFinder_CheckLimits(CMatchFinder *p)
++{
++ if (p->pos == kMaxValForNormalize)
++ MatchFinder_Normalize(p);
++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
++ MatchFinder_CheckAndMoveAndRead(p);
++ if (p->cyclicBufferPos == p->cyclicBufferSize)
++ p->cyclicBufferPos = 0;
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ son[_cyclicBufferPos] = curMatch;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ return distances;
++ {
++ const Byte *pb = cur - delta;
++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
++ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
++ {
++ UInt32 len = 0;
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ return distances;
++ }
++ }
++ }
++ }
++}
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return distances;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ if (++len != lenLimit && pb[len] == cur[len])
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return distances;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ {
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++#define MOVE_POS \
++ ++p->cyclicBufferPos; \
++ p->buffer++; \
++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
++
++#define MOVE_POS_RET MOVE_POS return offset;
++
++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
++
++#define GET_MATCHES_HEADER2(minLen, ret_op) \
++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
++ cur = p->buffer;
++
++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
++
++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
++
++#define GET_MATCHES_FOOTER(offset, maxLen) \
++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
++ distances + offset, maxLen) - distances); MOVE_POS_RET;
++
++#define SKIP_FOOTER \
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
++
++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 1)
++}
++
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 2)
++}
++
++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, delta2, maxLen, offset;
++ GET_MATCHES_HEADER(3)
++
++ HASH3_CALC;
++
++ delta2 = p->pos - p->hash[hash2Value];
++ curMatch = p->hash[kFix3HashSize + hashValue];
++
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++
++
++ maxLen = 2;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[0] = maxLen;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances + offset, maxLen) - (distances));
++ MOVE_POS_RET
++}
++
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances, 2) - (distances));
++ MOVE_POS_RET
++}
++
++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value;
++ SKIP_HEADER(3)
++ HASH3_CALC;
++ curMatch = p->hash[kFix3HashSize + hashValue];
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] = p->pos;
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
++{
++ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
++ if (!p->btMode)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 2)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 3)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
++ }
++ else
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
++ }
++}
+--- /dev/null
++++ b/lib/lzma/LzmaDec.c
+@@ -0,0 +1,999 @@
++/* LzmaDec.c -- LZMA Decoder
++2009-09-20 : Igor Pavlov : Public domain */
++
++#include "LzmaDec.h"
++
++#include <string.h>
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++#define RC_INIT_SIZE 5
++
++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
++ { UPDATE_0(p); i = (i + i); A0; } else \
++ { UPDATE_1(p); i = (i + i) + 1; A1; }
++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
++
++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
++#define TREE_DECODE(probs, limit, i) \
++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
++
++/* #define _LZMA_SIZE_OPT */
++
++#ifdef _LZMA_SIZE_OPT
++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
++#else
++#define TREE_6_DECODE(probs, i) \
++ { i = 1; \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ i -= 0x40; }
++#endif
++
++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0_CHECK range = bound;
++#define UPDATE_1_CHECK range -= bound; code -= bound;
++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
++ { UPDATE_0_CHECK; i = (i + i); A0; } else \
++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
++#define TREE_DECODE_CHECK(probs, limit, i) \
++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
++
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++
++#define kNumStates 12
++#define kNumLitStates 7
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#define LZMA_DIC_MIN (1 << 12)
++
++/* First LZMA-symbol is always decoded.
++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
++Out:
++ Result:
++ SZ_OK - OK
++ SZ_ERROR_DATA - Error
++ p->remainLen:
++ < kMatchSpecLenStart : normal remain
++ = kMatchSpecLenStart : finished
++ = kMatchSpecLenStart + 1 : Flush marker
++ = kMatchSpecLenStart + 2 : State Init Marker
++*/
++
++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ CLzmaProb *probs = p->probs;
++
++ unsigned state = p->state;
++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
++ unsigned lc = p->prop.lc;
++
++ Byte *dic = p->dic;
++ SizeT dicBufSize = p->dicBufSize;
++ SizeT dicPos = p->dicPos;
++
++ UInt32 processedPos = p->processedPos;
++ UInt32 checkDicSize = p->checkDicSize;
++ unsigned len = 0;
++
++ const Byte *buf = p->buf;
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++
++ do
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = processedPos & pbMask;
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ unsigned symbol;
++ UPDATE_0(prob);
++ prob = probs + Literal;
++ if (checkDicSize != 0 || processedPos != 0)
++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
++
++ if (state < kNumLitStates)
++ {
++ state -= (state < 4) ? state : 3;
++ symbol = 1;
++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ unsigned offs = 0x100;
++ state -= (state < 10) ? 3 : 6;
++ symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ dic[dicPos++] = (Byte)symbol;
++ processedPos++;
++ continue;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRep + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ state += kNumStates;
++ prob = probs + LenCoder;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ if (checkDicSize == 0 && processedPos == 0)
++ return SZ_ERROR_DATA;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ processedPos++;
++ state = state < kNumLitStates ? 9 : 11;
++ continue;
++ }
++ UPDATE_1(prob);
++ }
++ else
++ {
++ UInt32 distance;
++ UPDATE_1(prob);
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep1;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep2;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ distance = rep3;
++ rep3 = rep2;
++ }
++ rep2 = rep1;
++ }
++ rep1 = rep0;
++ rep0 = distance;
++ }
++ state = state < kNumLitStates ? 8 : 11;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = (1 << kLenNumLowBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenChoice2;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = (1 << kLenNumMidBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = (1 << kLenNumHighBits);
++ }
++ }
++ TREE_DECODE(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state >= kNumStates)
++ {
++ UInt32 distance;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
++ TREE_6_DECODE(prob, distance);
++ if (distance >= kStartPosModelIndex)
++ {
++ unsigned posSlot = (unsigned)distance;
++ int numDirectBits = (int)(((distance >> 1) - 1));
++ distance = (2 | (distance & 1));
++ if (posSlot < kEndPosModelIndex)
++ {
++ distance <<= numDirectBits;
++ prob = probs + SpecPos + distance - posSlot - 1;
++ {
++ UInt32 mask = 1;
++ unsigned i = 1;
++ do
++ {
++ GET_BIT2(prob + i, i, ; , distance |= mask);
++ mask <<= 1;
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE
++ range >>= 1;
++
++ {
++ UInt32 t;
++ code -= range;
++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
++ distance = (distance << 1) + (t + 1);
++ code += range & t;
++ }
++ /*
++ distance <<= 1;
++ if (code >= range)
++ {
++ code -= range;
++ distance |= 1;
++ }
++ */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ distance <<= kNumAlignBits;
++ {
++ unsigned i = 1;
++ GET_BIT2(prob + i, i, ; , distance |= 1);
++ GET_BIT2(prob + i, i, ; , distance |= 2);
++ GET_BIT2(prob + i, i, ; , distance |= 4);
++ GET_BIT2(prob + i, i, ; , distance |= 8);
++ }
++ if (distance == (UInt32)0xFFFFFFFF)
++ {
++ len += kMatchSpecLenStart;
++ state -= kNumStates;
++ break;
++ }
++ }
++ }
++ rep3 = rep2;
++ rep2 = rep1;
++ rep1 = rep0;
++ rep0 = distance + 1;
++ if (checkDicSize == 0)
++ {
++ if (distance >= processedPos)
++ return SZ_ERROR_DATA;
++ }
++ else if (distance >= checkDicSize)
++ return SZ_ERROR_DATA;
++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
++ }
++
++ len += kMatchMinLen;
++
++ if (limit == dicPos)
++ return SZ_ERROR_DATA;
++ {
++ SizeT rem = limit - dicPos;
++ unsigned curLen = ((rem < len) ? (unsigned)rem : len);
++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
++
++ processedPos += curLen;
++
++ len -= curLen;
++ if (pos + curLen <= dicBufSize)
++ {
++ Byte *dest = dic + dicPos;
++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
++ const Byte *lim = dest + curLen;
++ dicPos += curLen;
++ do
++ *(dest) = (Byte)*(dest + src);
++ while (++dest != lim);
++ }
++ else
++ {
++ do
++ {
++ dic[dicPos++] = dic[pos];
++ if (++pos == dicBufSize)
++ pos = 0;
++ }
++ while (--curLen != 0);
++ }
++ }
++ }
++ }
++ while (dicPos < limit && buf < bufLimit);
++ NORMALIZE;
++ p->buf = buf;
++ p->range = range;
++ p->code = code;
++ p->remainLen = len;
++ p->dicPos = dicPos;
++ p->processedPos = processedPos;
++ p->reps[0] = rep0;
++ p->reps[1] = rep1;
++ p->reps[2] = rep2;
++ p->reps[3] = rep3;
++ p->state = state;
++
++ return SZ_OK;
++}
++
++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
++{
++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
++ {
++ Byte *dic = p->dic;
++ SizeT dicPos = p->dicPos;
++ SizeT dicBufSize = p->dicBufSize;
++ unsigned len = p->remainLen;
++ UInt32 rep0 = p->reps[0];
++ if (limit - dicPos < len)
++ len = (unsigned)(limit - dicPos);
++
++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
++ p->checkDicSize = p->prop.dicSize;
++
++ p->processedPos += len;
++ p->remainLen -= len;
++ while (len-- != 0)
++ {
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ }
++ p->dicPos = dicPos;
++ }
++}
++
++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ do
++ {
++ SizeT limit2 = limit;
++ if (p->checkDicSize == 0)
++ {
++ UInt32 rem = p->prop.dicSize - p->processedPos;
++ if (limit - p->dicPos > rem)
++ limit2 = p->dicPos + rem;
++ }
++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
++ if (p->processedPos >= p->prop.dicSize)
++ p->checkDicSize = p->prop.dicSize;
++ LzmaDec_WriteRem(p, limit);
++ }
++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
++
++ if (p->remainLen > kMatchSpecLenStart)
++ {
++ p->remainLen = kMatchSpecLenStart;
++ }
++ return 0;
++}
++
++typedef enum
++{
++ DUMMY_ERROR, /* unexpected end of input stream */
++ DUMMY_LIT,
++ DUMMY_MATCH,
++ DUMMY_REP
++} ELzmaDummy;
++
++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
++{
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++ const Byte *bufLimit = buf + inSize;
++ CLzmaProb *probs = p->probs;
++ unsigned state = p->state;
++ ELzmaDummy res;
++
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK
++
++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
++
++ prob = probs + Literal;
++ if (p->checkDicSize != 0 || p->processedPos != 0)
++ prob += (LZMA_LIT_SIZE *
++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
++
++ if (state < kNumLitStates)
++ {
++ unsigned symbol = 1;
++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
++ unsigned offs = 0x100;
++ unsigned symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ res = DUMMY_LIT;
++ }
++ else
++ {
++ unsigned len;
++ UPDATE_1_CHECK;
++
++ prob = probs + IsRep + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ state = 0;
++ prob = probs + LenCoder;
++ res = DUMMY_MATCH;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ res = DUMMY_REP;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ NORMALIZE_CHECK;
++ return DUMMY_REP;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ }
++ state = kNumStates;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = 1 << kLenNumLowBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenChoice2;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = 1 << kLenNumMidBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = 1 << kLenNumHighBits;
++ }
++ }
++ TREE_DECODE_CHECK(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state < 4)
++ {
++ unsigned posSlot;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++ kNumPosSlotBits);
++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
++ if (posSlot >= kStartPosModelIndex)
++ {
++ int numDirectBits = ((posSlot >> 1) - 1);
++
++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
++
++ if (posSlot < kEndPosModelIndex)
++ {
++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE_CHECK
++ range >>= 1;
++ code -= range & (((code - range) >> 31) - 1);
++ /* if (code >= range) code -= range; */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ numDirectBits = kNumAlignBits;
++ }
++ {
++ unsigned i = 1;
++ do
++ {
++ GET_BIT_CHECK(prob + i, i);
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ }
++ }
++ }
++ NORMALIZE_CHECK;
++ return res;
++}
++
++
++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
++{
++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
++ p->range = 0xFFFFFFFF;
++ p->needFlush = 0;
++}
++
++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++{
++ p->needFlush = 1;
++ p->remainLen = 0;
++ p->tempBufSize = 0;
++
++ if (initDic)
++ {
++ p->processedPos = 0;
++ p->checkDicSize = 0;
++ p->needInitState = 1;
++ }
++ if (initState)
++ p->needInitState = 1;
++}
++
++void LzmaDec_Init(CLzmaDec *p)
++{
++ p->dicPos = 0;
++ LzmaDec_InitDicAndState(p, True, True);
++}
++
++static void LzmaDec_InitStateReal(CLzmaDec *p)
++{
++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
++ UInt32 i;
++ CLzmaProb *probs = p->probs;
++ for (i = 0; i < numProbs; i++)
++ probs[i] = kBitModelTotal >> 1;
++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
++ p->state = 0;
++ p->needInitState = 0;
++}
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++ ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT inSize = *srcLen;
++ (*srcLen) = 0;
++ LzmaDec_WriteRem(p, dicLimit);
++
++ *status = LZMA_STATUS_NOT_SPECIFIED;
++
++ while (p->remainLen != kMatchSpecLenStart)
++ {
++ int checkEndMarkNow;
++
++ if (p->needFlush != 0)
++ {
++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
++ p->tempBuf[p->tempBufSize++] = *src++;
++ if (p->tempBufSize < RC_INIT_SIZE)
++ {
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (p->tempBuf[0] != 0)
++ return SZ_ERROR_DATA;
++
++ LzmaDec_InitRc(p, p->tempBuf);
++ p->tempBufSize = 0;
++ }
++
++ checkEndMarkNow = 0;
++ if (p->dicPos >= dicLimit)
++ {
++ if (p->remainLen == 0 && p->code == 0)
++ {
++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
++ return SZ_OK;
++ }
++ if (finishMode == LZMA_FINISH_ANY)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_OK;
++ }
++ if (p->remainLen != 0)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ checkEndMarkNow = 1;
++ }
++
++ if (p->needInitState)
++ LzmaDec_InitStateReal(p);
++
++ if (p->tempBufSize == 0)
++ {
++ SizeT processed;
++ const Byte *bufLimit;
++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ memcpy(p->tempBuf, src, inSize);
++ p->tempBufSize = (unsigned)inSize;
++ (*srcLen) += inSize;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ bufLimit = src;
++ }
++ else
++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
++ p->buf = src;
++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
++ return SZ_ERROR_DATA;
++ processed = (SizeT)(p->buf - src);
++ (*srcLen) += processed;
++ src += processed;
++ inSize -= processed;
++ }
++ else
++ {
++ unsigned rem = p->tempBufSize, lookAhead = 0;
++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
++ p->tempBuf[rem++] = src[lookAhead++];
++ p->tempBufSize = rem;
++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ (*srcLen) += lookAhead;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ }
++ p->buf = p->tempBuf;
++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
++ return SZ_ERROR_DATA;
++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
++ (*srcLen) += lookAhead;
++ src += lookAhead;
++ inSize -= lookAhead;
++ p->tempBufSize = 0;
++ }
++ }
++ if (p->code == 0)
++ *status = LZMA_STATUS_FINISHED_WITH_MARK;
++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
++}
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT outSize = *destLen;
++ SizeT inSize = *srcLen;
++ *srcLen = *destLen = 0;
++ for (;;)
++ {
++ SizeT inSizeCur = inSize, outSizeCur, dicPos;
++ ELzmaFinishMode curFinishMode;
++ SRes res;
++ if (p->dicPos == p->dicBufSize)
++ p->dicPos = 0;
++ dicPos = p->dicPos;
++ if (outSize > p->dicBufSize - dicPos)
++ {
++ outSizeCur = p->dicBufSize;
++ curFinishMode = LZMA_FINISH_ANY;
++ }
++ else
++ {
++ outSizeCur = dicPos + outSize;
++ curFinishMode = finishMode;
++ }
++
++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
++ src += inSizeCur;
++ inSize -= inSizeCur;
++ *srcLen += inSizeCur;
++ outSizeCur = p->dicPos - dicPos;
++ memcpy(dest, p->dic + dicPos, outSizeCur);
++ dest += outSizeCur;
++ outSize -= outSizeCur;
++ *destLen += outSizeCur;
++ if (res != 0)
++ return res;
++ if (outSizeCur == 0 || outSize == 0)
++ return SZ_OK;
++ }
++}
++
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->probs);
++ p->probs = 0;
++}
++
++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->dic);
++ p->dic = 0;
++}
++
++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++{
++ LzmaDec_FreeProbs(p, alloc);
++ LzmaDec_FreeDict(p, alloc);
++}
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++{
++ UInt32 dicSize;
++ Byte d;
++
++ if (size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_UNSUPPORTED;
++ else
++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
++
++ if (dicSize < LZMA_DIC_MIN)
++ dicSize = LZMA_DIC_MIN;
++ p->dicSize = dicSize;
++
++ d = data[0];
++ if (d >= (9 * 5 * 5))
++ return SZ_ERROR_UNSUPPORTED;
++
++ p->lc = d % 9;
++ d /= 9;
++ p->pb = d / 5;
++ p->lp = d % 5;
++
++ return SZ_OK;
++}
++
++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
++{
++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
++ if (p->probs == 0 || numProbs != p->numProbs)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
++ p->numProbs = numProbs;
++ if (p->probs == 0)
++ return SZ_ERROR_MEM;
++ }
++ return SZ_OK;
++}
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ SizeT dicBufSize;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ dicBufSize = propNew.dicSize;
++ if (p->dic == 0 || dicBufSize != p->dicBufSize)
++ {
++ LzmaDec_FreeDict(p, alloc);
++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
++ if (p->dic == 0)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ }
++ p->dicBufSize = dicBufSize;
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc)
++{
++ CLzmaDec p;
++ SRes res;
++ SizeT inSize = *srcLen;
++ SizeT outSize = *destLen;
++ *srcLen = *destLen = 0;
++ if (inSize < RC_INIT_SIZE)
++ return SZ_ERROR_INPUT_EOF;
++
++ LzmaDec_Construct(&p);
++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
++ if (res != 0)
++ return res;
++ p.dic = dest;
++ p.dicBufSize = outSize;
++
++ LzmaDec_Init(&p);
++
++ *srcLen = inSize;
++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
++
++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
++ res = SZ_ERROR_INPUT_EOF;
++
++ (*destLen) = p.dicPos;
++ LzmaDec_FreeProbs(&p, alloc);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/LzmaEnc.c
+@@ -0,0 +1,2271 @@
++/* LzmaEnc.c -- LZMA Encoder
++2009-11-24 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++/* #define SHOW_STAT */
++/* #define SHOW_STAT2 */
++
++#if defined(SHOW_STAT) || defined(SHOW_STAT2)
++#include <stdio.h>
++#endif
++
++#include "LzmaEnc.h"
++
++/* disable MT */
++#define _7ZIP_ST
++
++#include "LzFind.h"
++#ifndef _7ZIP_ST
++#include "LzFindMt.h"
++#endif
++
++#ifdef SHOW_STAT
++static int ttt = 0;
++#endif
++
++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
++
++#define kBlockSize (9 << 10)
++#define kUnpackBlockSize (1 << 18)
++#define kMatchArraySize (1 << 21)
++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
++
++#define kNumMaxDirectBits (31)
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++#define kProbInitValue (kBitModelTotal >> 1)
++
++#define kNumMoveReducingBits 4
++#define kNumBitPriceShiftBits 4
++#define kBitPrice (1 << kNumBitPriceShiftBits)
++
++void LzmaEncProps_Init(CLzmaEncProps *p)
++{
++ p->level = 5;
++ p->dictSize = p->mc = 0;
++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
++ p->writeEndMark = 0;
++}
++
++void LzmaEncProps_Normalize(CLzmaEncProps *p)
++{
++ int level = p->level;
++ if (level < 0) level = 5;
++ p->level = level;
++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
++ if (p->lc < 0) p->lc = 3;
++ if (p->lp < 0) p->lp = 0;
++ if (p->pb < 0) p->pb = 2;
++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
++ if (p->numHashBytes < 0) p->numHashBytes = 4;
++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
++ if (p->numThreads < 0)
++ p->numThreads =
++ #ifndef _7ZIP_ST
++ ((p->btMode && p->algo) ? 2 : 1);
++ #else
++ 1;
++ #endif
++}
++
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++{
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++ return props.dictSize;
++}
++
++/* #define LZMA_LOG_BSR */
++/* Define it for Intel's CPU */
++
++
++#ifdef LZMA_LOG_BSR
++
++#define kDicLogSizeMaxCompress 30
++
++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
++
++UInt32 GetPosSlot1(UInt32 pos)
++{
++ UInt32 res;
++ BSR2_RET(pos, res);
++ return res;
++}
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
++
++#else
++
++#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
++
++void LzmaEnc_FastPosInit(Byte *g_FastPos)
++{
++ int c = 2, slotFast;
++ g_FastPos[0] = 0;
++ g_FastPos[1] = 1;
++
++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
++ {
++ UInt32 k = (1 << ((slotFast >> 1) - 1));
++ UInt32 j;
++ for (j = 0; j < k; j++, c++)
++ g_FastPos[c] = (Byte)slotFast;
++ }
++}
++
++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
++ res = p->g_FastPos[pos >> i] + (i * 2); }
++/*
++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
++ p->g_FastPos[pos >> 6] + 12 : \
++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
++*/
++
++#define GetPosSlot1(pos) p->g_FastPos[pos]
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
++
++#endif
++
++
++#define LZMA_NUM_REPS 4
++
++typedef unsigned CState;
++
++typedef struct
++{
++ UInt32 price;
++
++ CState state;
++ int prev1IsChar;
++ int prev2;
++
++ UInt32 posPrev2;
++ UInt32 backPrev2;
++
++ UInt32 posPrev;
++ UInt32 backPrev;
++ UInt32 backs[LZMA_NUM_REPS];
++} COptimal;
++
++#define kNumOpts (1 << 12)
++
++#define kNumLenToPosStates 4
++#define kNumPosSlotBits 6
++#define kDicLogSizeMin 0
++#define kDicLogSizeMax 32
++#define kDistTableSizeMax (kDicLogSizeMax * 2)
++
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++#define kAlignMask (kAlignTableSize - 1)
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
++
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++#define LZMA_PB_MAX 4
++#define LZMA_LC_MAX 8
++#define LZMA_LP_MAX 4
++
++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
++
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define LZMA_MATCH_LEN_MIN 2
++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
++
++#define kNumStates 12
++
++typedef struct
++{
++ CLzmaProb choice;
++ CLzmaProb choice2;
++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
++ CLzmaProb high[kLenNumHighSymbols];
++} CLenEnc;
++
++typedef struct
++{
++ CLenEnc p;
++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
++ UInt32 tableSize;
++ UInt32 counters[LZMA_NUM_PB_STATES_MAX];
++} CLenPriceEnc;
++
++typedef struct
++{
++ UInt32 range;
++ Byte cache;
++ UInt64 low;
++ UInt64 cacheSize;
++ Byte *buf;
++ Byte *bufLim;
++ Byte *bufBase;
++ ISeqOutStream *outStream;
++ UInt64 processed;
++ SRes res;
++} CRangeEnc;
++
++typedef struct
++{
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++} CSaveState;
++
++typedef struct
++{
++ IMatchFinder matchFinder;
++ void *matchFinderObj;
++
++ #ifndef _7ZIP_ST
++ Bool mtMode;
++ CMatchFinderMt matchFinderMt;
++ #endif
++
++ CMatchFinder matchFinderBase;
++
++ #ifndef _7ZIP_ST
++ Byte pad[128];
++ #endif
++
++ UInt32 optimumEndIndex;
++ UInt32 optimumCurrentIndex;
++
++ UInt32 longestMatchLength;
++ UInt32 numPairs;
++ UInt32 numAvail;
++ COptimal opt[kNumOpts];
++
++ #ifndef LZMA_LOG_BSR
++ Byte g_FastPos[1 << kNumLogBits];
++ #endif
++
++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
++ UInt32 numFastBytes;
++ UInt32 additionalOffset;
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++
++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
++ UInt32 alignPrices[kAlignTableSize];
++ UInt32 alignPriceCount;
++
++ UInt32 distTableSize;
++
++ unsigned lc, lp, pb;
++ unsigned lpMask, pbMask;
++
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ unsigned lclp;
++
++ Bool fastMode;
++
++ CRangeEnc rc;
++
++ Bool writeEndMark;
++ UInt64 nowPos64;
++ UInt32 matchPriceCount;
++ Bool finished;
++ Bool multiThread;
++
++ SRes result;
++ UInt32 dictSize;
++ UInt32 matchFinderCycles;
++
++ int needInit;
++
++ CSaveState saveState;
++} CLzmaEnc;
++
++void LzmaEnc_SaveState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CSaveState *dest = &p->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
++}
++
++void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *dest = (CLzmaEnc *)pp;
++ const CSaveState *p = &dest->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
++}
++
++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++
++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
++ return SZ_ERROR_PARAM;
++ p->dictSize = props.dictSize;
++ p->matchFinderCycles = props.mc;
++ {
++ unsigned fb = props.fb;
++ if (fb < 5)
++ fb = 5;
++ if (fb > LZMA_MATCH_LEN_MAX)
++ fb = LZMA_MATCH_LEN_MAX;
++ p->numFastBytes = fb;
++ }
++ p->lc = props.lc;
++ p->lp = props.lp;
++ p->pb = props.pb;
++ p->fastMode = (props.algo == 0);
++ p->matchFinderBase.btMode = props.btMode;
++ {
++ UInt32 numHashBytes = 4;
++ if (props.btMode)
++ {
++ if (props.numHashBytes < 2)
++ numHashBytes = 2;
++ else if (props.numHashBytes < 4)
++ numHashBytes = props.numHashBytes;
++ }
++ p->matchFinderBase.numHashBytes = numHashBytes;
++ }
++
++ p->matchFinderBase.cutValue = props.mc;
++
++ p->writeEndMark = props.writeEndMark;
++
++ #ifndef _7ZIP_ST
++ /*
++ if (newMultiThread != _multiThread)
++ {
++ ReleaseMatchFinder();
++ _multiThread = newMultiThread;
++ }
++ */
++ p->multiThread = (props.numThreads > 1);
++ #endif
++
++ return SZ_OK;
++}
++
++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
++
++#define IsCharState(s) ((s) < 7)
++
++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
++
++#define kInfinityPrice (1 << 30)
++
++static void RangeEnc_Construct(CRangeEnc *p)
++{
++ p->outStream = 0;
++ p->bufBase = 0;
++}
++
++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
++
++#define RC_BUF_SIZE (1 << 16)
++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
++{
++ if (p->bufBase == 0)
++ {
++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
++ if (p->bufBase == 0)
++ return 0;
++ p->bufLim = p->bufBase + RC_BUF_SIZE;
++ }
++ return 1;
++}
++
++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->bufBase);
++ p->bufBase = 0;
++}
++
++static void RangeEnc_Init(CRangeEnc *p)
++{
++ /* Stream.Init(); */
++ p->low = 0;
++ p->range = 0xFFFFFFFF;
++ p->cacheSize = 1;
++ p->cache = 0;
++
++ p->buf = p->bufBase;
++
++ p->processed = 0;
++ p->res = SZ_OK;
++}
++
++static void RangeEnc_FlushStream(CRangeEnc *p)
++{
++ size_t num;
++ if (p->res != SZ_OK)
++ return;
++ num = p->buf - p->bufBase;
++ if (num != p->outStream->Write(p->outStream, p->bufBase, num))
++ p->res = SZ_ERROR_WRITE;
++ p->processed += num;
++ p->buf = p->bufBase;
++}
++
++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
++{
++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
++ {
++ Byte temp = p->cache;
++ do
++ {
++ Byte *buf = p->buf;
++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
++ p->buf = buf;
++ if (buf == p->bufLim)
++ RangeEnc_FlushStream(p);
++ temp = 0xFF;
++ }
++ while (--p->cacheSize != 0);
++ p->cache = (Byte)((UInt32)p->low >> 24);
++ }
++ p->cacheSize++;
++ p->low = (UInt32)p->low << 8;
++}
++
++static void RangeEnc_FlushData(CRangeEnc *p)
++{
++ int i;
++ for (i = 0; i < 5; i++)
++ RangeEnc_ShiftLow(p);
++}
++
++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
++{
++ do
++ {
++ p->range >>= 1;
++ p->low += p->range & (0 - ((value >> --numBits) & 1));
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++ }
++ while (numBits != 0);
++}
++
++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
++{
++ UInt32 ttt = *prob;
++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
++ if (symbol == 0)
++ {
++ p->range = newBound;
++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
++ }
++ else
++ {
++ p->low += newBound;
++ p->range -= newBound;
++ ttt -= ttt >> kNumMoveBits;
++ }
++ *prob = (CLzmaProb)ttt;
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++}
++
++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
++{
++ symbol |= 0x100;
++ do
++ {
++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++}
++
++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
++{
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++}
++
++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++{
++ UInt32 i;
++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
++ {
++ const int kCyclesBits = kNumBitPriceShiftBits;
++ UInt32 w = i;
++ UInt32 bitCount = 0;
++ int j;
++ for (j = 0; j < kCyclesBits; j++)
++ {
++ w = w * w;
++ bitCount <<= 1;
++ while (w >= ((UInt32)1 << 16))
++ {
++ w >>= 1;
++ bitCount++;
++ }
++ }
++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
++ }
++}
++
++
++#define GET_PRICE(prob, symbol) \
++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICEa(prob, symbol) \
++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= 0x100;
++ do
++ {
++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++
++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0;)
++ {
++ UInt32 bit;
++ i--;
++ bit = (symbol >> i) & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ }
++}
++
++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = 0; i < numBitLevels; i++)
++ {
++ UInt32 bit = symbol & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ symbol >>= 1;
++ }
++}
++
++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= (1 << numBitLevels);
++ while (symbol != 1)
++ {
++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
++ symbol >>= 1;
++ }
++ return price;
++}
++
++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0; i--)
++ {
++ UInt32 bit = symbol & 1;
++ symbol >>= 1;
++ price += GET_PRICEa(probs[m], bit);
++ m = (m << 1) | bit;
++ }
++ return price;
++}
++
++
++static void LenEnc_Init(CLenEnc *p)
++{
++ unsigned i;
++ p->choice = p->choice2 = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
++ p->low[i] = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
++ p->mid[i] = kProbInitValue;
++ for (i = 0; i < kLenNumHighSymbols; i++)
++ p->high[i] = kProbInitValue;
++}
++
++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
++{
++ if (symbol < kLenNumLowSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 0);
++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 1);
++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 0);
++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 1);
++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
++ }
++ }
++}
++
++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
++{
++ UInt32 a0 = GET_PRICE_0a(p->choice);
++ UInt32 a1 = GET_PRICE_1a(p->choice);
++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
++ UInt32 i = 0;
++ for (i = 0; i < kLenNumLowSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
++ }
++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
++ }
++ for (; i < numSymbols; i++)
++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
++}
++
++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
++{
++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
++ p->counters[posState] = p->tableSize;
++}
++
++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
++{
++ UInt32 posState;
++ for (posState = 0; posState < numPosStates; posState++)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
++{
++ LenEnc_Encode(&p->p, rc, symbol, posState);
++ if (updatePrice)
++ if (--p->counters[posState] == 0)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++
++
++
++static void MovePos(CLzmaEnc *p, UInt32 num)
++{
++ #ifdef SHOW_STAT
++ ttt += num;
++ printf("\n MovePos %d", num);
++ #endif
++ if (num != 0)
++ {
++ p->additionalOffset += num;
++ p->matchFinder.Skip(p->matchFinderObj, num);
++ }
++}
++
++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
++{
++ UInt32 lenRes = 0, numPairs;
++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
++ #ifdef SHOW_STAT
++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2);
++ ttt++;
++ {
++ UInt32 i;
++ for (i = 0; i < numPairs; i += 2)
++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]);
++ }
++ #endif
++ if (numPairs > 0)
++ {
++ lenRes = p->matches[numPairs - 2];
++ if (lenRes == p->numFastBytes)
++ {
++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ UInt32 distance = p->matches[numPairs - 1] + 1;
++ UInt32 numAvail = p->numAvail;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ {
++ const Byte *pby2 = pby - distance;
++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
++ }
++ }
++ }
++ p->additionalOffset++;
++ *numDistancePairsRes = numPairs;
++ return lenRes;
++}
++
++
++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
++#define IsShortRep(p) ((p)->backPrev == 0)
++
++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
++{
++ return
++ GET_PRICE_0(p->isRepG0[state]) +
++ GET_PRICE_0(p->isRep0Long[state][posState]);
++}
++
++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
++{
++ UInt32 price;
++ if (repIndex == 0)
++ {
++ price = GET_PRICE_0(p->isRepG0[state]);
++ price += GET_PRICE_1(p->isRep0Long[state][posState]);
++ }
++ else
++ {
++ price = GET_PRICE_1(p->isRepG0[state]);
++ if (repIndex == 1)
++ price += GET_PRICE_0(p->isRepG1[state]);
++ else
++ {
++ price += GET_PRICE_1(p->isRepG1[state]);
++ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
++ }
++ }
++ return price;
++}
++
++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
++{
++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
++ GetPureRepPrice(p, repIndex, state, posState);
++}
++
++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
++{
++ UInt32 posMem = p->opt[cur].posPrev;
++ UInt32 backMem = p->opt[cur].backPrev;
++ p->optimumEndIndex = cur;
++ do
++ {
++ if (p->opt[cur].prev1IsChar)
++ {
++ MakeAsChar(&p->opt[posMem])
++ p->opt[posMem].posPrev = posMem - 1;
++ if (p->opt[cur].prev2)
++ {
++ p->opt[posMem - 1].prev1IsChar = False;
++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
++ }
++ }
++ {
++ UInt32 posPrev = posMem;
++ UInt32 backCur = backMem;
++
++ backMem = p->opt[posPrev].backPrev;
++ posMem = p->opt[posPrev].posPrev;
++
++ p->opt[posPrev].backPrev = backCur;
++ p->opt[posPrev].posPrev = cur;
++ cur = posPrev;
++ }
++ }
++ while (cur != 0);
++ *backRes = p->opt[0].backPrev;
++ p->optimumCurrentIndex = p->opt[0].posPrev;
++ return p->optimumCurrentIndex;
++}
++
++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
++
++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
++ UInt32 matchPrice, repMatchPrice, normalMatchPrice;
++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
++ UInt32 *matches;
++ const Byte *data;
++ Byte curByte, matchByte;
++ if (p->optimumEndIndex != p->optimumCurrentIndex)
++ {
++ const COptimal *opt = &p->opt[p->optimumCurrentIndex];
++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
++ *backRes = opt->backPrev;
++ p->optimumCurrentIndex = opt->posPrev;
++ return lenRes;
++ }
++ p->optimumCurrentIndex = p->optimumEndIndex = 0;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ if (numAvail < 2)
++ {
++ *backRes = (UInt32)(-1);
++ return 1;
++ }
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ repMaxIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 lenTest;
++ const Byte *data2;
++ reps[i] = p->reps[i];
++ data2 = data - (reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ {
++ repLens[i] = 0;
++ continue;
++ }
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ repLens[i] = lenTest;
++ if (lenTest > repLens[repMaxIndex])
++ repMaxIndex = i;
++ }
++ if (repLens[repMaxIndex] >= p->numFastBytes)
++ {
++ UInt32 lenRes;
++ *backRes = repMaxIndex;
++ lenRes = repLens[repMaxIndex];
++ MovePos(p, lenRes - 1);
++ return lenRes;
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
++ {
++ *backRes = (UInt32)-1;
++ return 1;
++ }
++
++ p->opt[0].state = (CState)p->state;
++
++ posState = (position & p->pbMask);
++
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
++ (!IsCharState(p->state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ MakeAsChar(&p->opt[1]);
++
++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
++
++ if (matchByte == curByte)
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
++ if (shortRepPrice < p->opt[1].price)
++ {
++ p->opt[1].price = shortRepPrice;
++ MakeAsShortRep(&p->opt[1]);
++ }
++ }
++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
++
++ if (lenEnd < 2)
++ {
++ *backRes = p->opt[1].backPrev;
++ return 1;
++ }
++
++ p->opt[1].posPrev = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ p->opt[0].backs[i] = reps[i];
++
++ len = lenEnd;
++ do
++ p->opt[len--].price = kInfinityPrice;
++ while (len >= 2);
++
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 repLen = repLens[i];
++ UInt32 price;
++ if (repLen < 2)
++ continue;
++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
++ COptimal *opt = &p->opt[repLen];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = i;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--repLen >= 2);
++ }
++
++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
++
++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
++ if (len <= mainLen)
++ {
++ UInt32 offs = 0;
++ while (len > matches[offs])
++ offs += 2;
++ for (; ; len++)
++ {
++ COptimal *opt;
++ UInt32 distance = matches[offs + 1];
++
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(len);
++ if (distance < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][distance];
++ else
++ {
++ UInt32 slot;
++ GetPosSlot2(distance, slot);
++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
++ }
++ opt = &p->opt[len];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = distance + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++ if (len == matches[offs])
++ {
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ }
++ }
++ }
++
++ cur = 0;
++
++ #ifdef SHOW_STAT2
++ if (position >= 0)
++ {
++ unsigned i;
++ printf("\n pos = %4X", position);
++ for (i = cur; i <= lenEnd; i++)
++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
++ }
++ #endif
++
++ for (;;)
++ {
++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
++ Bool nextIsChar;
++ Byte curByte, matchByte;
++ const Byte *data;
++ COptimal *curOpt;
++ COptimal *nextOpt;
++
++ cur++;
++ if (cur == lenEnd)
++ return Backward(p, backRes, cur);
++
++ newLen = ReadMatchDistances(p, &numPairs);
++ if (newLen >= p->numFastBytes)
++ {
++ p->numPairs = numPairs;
++ p->longestMatchLength = newLen;
++ return Backward(p, backRes, cur);
++ }
++ position++;
++ curOpt = &p->opt[cur];
++ posPrev = curOpt->posPrev;
++ if (curOpt->prev1IsChar)
++ {
++ posPrev--;
++ if (curOpt->prev2)
++ {
++ state = p->opt[curOpt->posPrev2].state;
++ if (curOpt->backPrev2 < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ state = kLiteralNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ if (posPrev == cur - 1)
++ {
++ if (IsShortRep(curOpt))
++ state = kShortRepNextStates[state];
++ else
++ state = kLiteralNextStates[state];
++ }
++ else
++ {
++ UInt32 pos;
++ const COptimal *prevOpt;
++ if (curOpt->prev1IsChar && curOpt->prev2)
++ {
++ posPrev = curOpt->posPrev2;
++ pos = curOpt->backPrev2;
++ state = kRepNextStates[state];
++ }
++ else
++ {
++ pos = curOpt->backPrev;
++ if (pos < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ prevOpt = &p->opt[posPrev];
++ if (pos < LZMA_NUM_REPS)
++ {
++ UInt32 i;
++ reps[0] = prevOpt->backs[pos];
++ for (i = 1; i <= pos; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ for (; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i];
++ }
++ else
++ {
++ UInt32 i;
++ reps[0] = (pos - LZMA_NUM_REPS);
++ for (i = 1; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ }
++ }
++ curOpt->state = (CState)state;
++
++ curOpt->backs[0] = reps[0];
++ curOpt->backs[1] = reps[1];
++ curOpt->backs[2] = reps[2];
++ curOpt->backs[3] = reps[3];
++
++ curPrice = curOpt->price;
++ nextIsChar = False;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ posState = (position & p->pbMask);
++
++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ curAnd1Price +=
++ (!IsCharState(state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ nextOpt = &p->opt[cur + 1];
++
++ if (curAnd1Price < nextOpt->price)
++ {
++ nextOpt->price = curAnd1Price;
++ nextOpt->posPrev = cur;
++ MakeAsChar(nextOpt);
++ nextIsChar = True;
++ }
++
++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
++
++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
++ if (shortRepPrice <= nextOpt->price)
++ {
++ nextOpt->price = shortRepPrice;
++ nextOpt->posPrev = cur;
++ MakeAsShortRep(nextOpt);
++ nextIsChar = True;
++ }
++ }
++ numAvailFull = p->numAvail;
++ {
++ UInt32 temp = kNumOpts - 1 - cur;
++ if (temp < numAvailFull)
++ numAvailFull = temp;
++ }
++
++ if (numAvailFull < 2)
++ continue;
++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
++
++ if (!nextIsChar && matchByte != curByte) /* speed optimization */
++ {
++ /* try Literal + rep0 */
++ UInt32 temp;
++ UInt32 lenTest2;
++ const Byte *data2 = data - (reps[0] + 1);
++ UInt32 limit = p->numFastBytes + 1;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++
++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
++ lenTest2 = temp - 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kLiteralNextStates[state];
++ UInt32 posStateNext = (position + 1) & p->pbMask;
++ UInt32 nextRepMatchPrice = curAnd1Price +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = False;
++ }
++ }
++ }
++ }
++
++ startLen = 2; /* speed optimization */
++ {
++ UInt32 repIndex;
++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
++ {
++ UInt32 lenTest;
++ UInt32 lenTestTemp;
++ UInt32 price;
++ const Byte *data2 = data - (reps[repIndex] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ while (lenEnd < cur + lenTest)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ lenTestTemp = lenTest;
++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
++ COptimal *opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = repIndex;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--lenTest >= 2);
++ lenTest = lenTestTemp;
++
++ if (repIndex == 0)
++ startLen = lenTest + 1;
++
++ /* if (_maxMode) */
++ {
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kRepNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice =
++ price + p->repLenEnc.prices[posState][lenTest - 2] +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (position + lenTest + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = repIndex;
++ }
++ }
++ }
++ }
++ }
++ }
++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
++ if (newLen > numAvail)
++ {
++ newLen = numAvail;
++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
++ matches[numPairs] = newLen;
++ numPairs += 2;
++ }
++ if (newLen >= startLen)
++ {
++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
++ UInt32 offs, curBack, posSlot;
++ UInt32 lenTest;
++ while (lenEnd < cur + newLen)
++ p->opt[++lenEnd].price = kInfinityPrice;
++
++ offs = 0;
++ while (startLen > matches[offs])
++ offs += 2;
++ curBack = matches[offs + 1];
++ GetPosSlot2(curBack, posSlot);
++ for (lenTest = /*2*/ startLen; ; lenTest++)
++ {
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(lenTest);
++ COptimal *opt;
++ if (curBack < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
++ else
++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
++
++ opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = curBack + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++
++ if (/*_maxMode && */lenTest == matches[offs])
++ {
++ /* Try Match + Literal + Rep0 */
++ const Byte *data2 = data - (curBack + 1);
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kMatchNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice = curAndLenPrice +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (posStateNext + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = curBack + LZMA_NUM_REPS;
++ }
++ }
++ }
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ curBack = matches[offs + 1];
++ if (curBack >= kNumFullDistances)
++ GetPosSlot2(curBack, posSlot);
++ }
++ }
++ }
++ }
++}
++
++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
++
++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
++ const Byte *data;
++ const UInt32 *matches;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ *backRes = (UInt32)-1;
++ if (numAvail < 2)
++ return 1;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++
++ repLen = repIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (len = 2; len < numAvail && data[len] == data2[len]; len++);
++ if (len >= p->numFastBytes)
++ {
++ *backRes = i;
++ MovePos(p, len - 1);
++ return len;
++ }
++ if (len > repLen)
++ {
++ repIndex = i;
++ repLen = len;
++ }
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++
++ mainDist = 0; /* for GCC */
++ if (mainLen >= 2)
++ {
++ mainDist = matches[numPairs - 1];
++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
++ {
++ if (!ChangePair(matches[numPairs - 3], mainDist))
++ break;
++ numPairs -= 2;
++ mainLen = matches[numPairs - 2];
++ mainDist = matches[numPairs - 1];
++ }
++ if (mainLen == 2 && mainDist >= 0x80)
++ mainLen = 1;
++ }
++
++ if (repLen >= 2 && (
++ (repLen + 1 >= mainLen) ||
++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
++ (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
++ {
++ *backRes = repIndex;
++ MovePos(p, repLen - 1);
++ return repLen;
++ }
++
++ if (mainLen < 2 || numAvail <= 2)
++ return 1;
++
++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
++ if (p->longestMatchLength >= 2)
++ {
++ UInt32 newDistance = matches[p->numPairs - 1];
++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
++ (p->longestMatchLength > mainLen + 1) ||
++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
++ return 1;
++ }
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len, limit;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ limit = mainLen - 1;
++ for (len = 2; len < limit && data[len] == data2[len]; len++);
++ if (len >= limit)
++ return 1;
++ }
++ *backRes = mainDist + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 2);
++ return mainLen;
++}
++
++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
++{
++ UInt32 len;
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ len = LZMA_MATCH_LEN_MIN;
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
++}
++
++static SRes CheckErrors(CLzmaEnc *p)
++{
++ if (p->result != SZ_OK)
++ return p->result;
++ if (p->rc.res != SZ_OK)
++ p->result = SZ_ERROR_WRITE;
++ if (p->matchFinderBase.result != SZ_OK)
++ p->result = SZ_ERROR_READ;
++ if (p->result != SZ_OK)
++ p->finished = True;
++ return p->result;
++}
++
++static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
++{
++ /* ReleaseMFStream(); */
++ p->finished = True;
++ if (p->writeEndMark)
++ WriteEndMarker(p, nowPos & p->pbMask);
++ RangeEnc_FlushData(&p->rc);
++ RangeEnc_FlushStream(&p->rc);
++ return CheckErrors(p);
++}
++
++static void FillAlignPrices(CLzmaEnc *p)
++{
++ UInt32 i;
++ for (i = 0; i < kAlignTableSize; i++)
++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
++ p->alignPriceCount = 0;
++}
++
++static void FillDistancesPrices(CLzmaEnc *p)
++{
++ UInt32 tempPrices[kNumFullDistances];
++ UInt32 i, lenToPosState;
++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
++ {
++ UInt32 posSlot = GetPosSlot1(i);
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
++ }
++
++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
++ {
++ UInt32 posSlot;
++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
++
++ {
++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
++ UInt32 i;
++ for (i = 0; i < kStartPosModelIndex; i++)
++ distancesPrices[i] = posSlotPrices[i];
++ for (; i < kNumFullDistances; i++)
++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
++ }
++ }
++ p->matchPriceCount = 0;
++}
++
++void LzmaEnc_Construct(CLzmaEnc *p)
++{
++ RangeEnc_Construct(&p->rc);
++ MatchFinder_Construct(&p->matchFinderBase);
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Construct(&p->matchFinderMt);
++ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
++ #endif
++
++ {
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++ LzmaEnc_SetProps(p, &props);
++ }
++
++ #ifndef LZMA_LOG_BSR
++ LzmaEnc_FastPosInit(p->g_FastPos);
++ #endif
++
++ LzmaEnc_InitPriceTables(p->ProbPrices);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
++{
++ void *p;
++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
++ if (p != 0)
++ LzmaEnc_Construct((CLzmaEnc *)p);
++ return p;
++}
++
++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->litProbs);
++ alloc->Free(alloc, p->saveState.litProbs);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
++ #endif
++ MatchFinder_Free(&p->matchFinderBase, allocBig);
++ LzmaEnc_FreeLits(p, alloc);
++ RangeEnc_Free(&p->rc, alloc);
++}
++
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
++ alloc->Free(alloc, p);
++}
++
++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
++{
++ UInt32 nowPos32, startPos32;
++ if (p->needInit)
++ {
++ p->matchFinder.Init(p->matchFinderObj);
++ p->needInit = 0;
++ }
++
++ if (p->finished)
++ return p->result;
++ RINOK(CheckErrors(p));
++
++ nowPos32 = (UInt32)p->nowPos64;
++ startPos32 = nowPos32;
++
++ if (p->nowPos64 == 0)
++ {
++ UInt32 numPairs;
++ Byte curByte;
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ return Flush(p, nowPos32);
++ ReadMatchDistances(p, &numPairs);
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
++ p->state = kLiteralNextStates[p->state];
++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
++ LitEnc_Encode(&p->rc, p->litProbs, curByte);
++ p->additionalOffset--;
++ nowPos32++;
++ }
++
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
++ for (;;)
++ {
++ UInt32 pos, len, posState;
++
++ if (p->fastMode)
++ len = GetOptimumFast(p, &pos);
++ else
++ len = GetOptimum(p, nowPos32, &pos);
++
++ #ifdef SHOW_STAT2
++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos);
++ #endif
++
++ posState = nowPos32 & p->pbMask;
++ if (len == 1 && pos == (UInt32)-1)
++ {
++ Byte curByte;
++ CLzmaProb *probs;
++ const Byte *data;
++
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++ curByte = *data;
++ probs = LIT_PROBS(nowPos32, *(data - 1));
++ if (IsCharState(p->state))
++ LitEnc_Encode(&p->rc, probs, curByte);
++ else
++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
++ p->state = kLiteralNextStates[p->state];
++ }
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ if (pos < LZMA_NUM_REPS)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
++ if (pos == 0)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
++ }
++ else
++ {
++ UInt32 distance = p->reps[pos];
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
++ if (pos == 1)
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
++ if (pos == 3)
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ }
++ p->reps[1] = p->reps[0];
++ p->reps[0] = distance;
++ }
++ if (len == 1)
++ p->state = kShortRepNextStates[p->state];
++ else
++ {
++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ p->state = kRepNextStates[p->state];
++ }
++ }
++ else
++ {
++ UInt32 posSlot;
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ pos -= LZMA_NUM_REPS;
++ GetPosSlot(pos, posSlot);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
++
++ if (posSlot >= kStartPosModelIndex)
++ {
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ UInt32 posReduced = pos - base;
++
++ if (posSlot < kEndPosModelIndex)
++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
++ else
++ {
++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
++ p->alignPriceCount++;
++ }
++ }
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ p->reps[1] = p->reps[0];
++ p->reps[0] = pos;
++ p->matchPriceCount++;
++ }
++ }
++ p->additionalOffset -= len;
++ nowPos32 += len;
++ if (p->additionalOffset == 0)
++ {
++ UInt32 processed;
++ if (!p->fastMode)
++ {
++ if (p->matchPriceCount >= (1 << 7))
++ FillDistancesPrices(p);
++ if (p->alignPriceCount >= kAlignTableSize)
++ FillAlignPrices(p);
++ }
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ break;
++ processed = nowPos32 - startPos32;
++ if (useLimits)
++ {
++ if (processed + kNumOpts + 300 >= maxUnpackSize ||
++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
++ break;
++ }
++ else if (processed >= (1 << 15))
++ {
++ p->nowPos64 += nowPos32 - startPos32;
++ return CheckErrors(p);
++ }
++ }
++ }
++ p->nowPos64 += nowPos32 - startPos32;
++ return Flush(p, nowPos32);
++}
++
++#define kBigHashDicLimit ((UInt32)1 << 24)
++
++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 beforeSize = kNumOpts;
++ Bool btMode;
++ if (!RangeEnc_Alloc(&p->rc, alloc))
++ return SZ_ERROR_MEM;
++ btMode = (p->matchFinderBase.btMode != 0);
++ #ifndef _7ZIP_ST
++ p->mtMode = (p->multiThread && !p->fastMode && btMode);
++ #endif
++
++ {
++ unsigned lclp = p->lc + p->lp;
++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ if (p->litProbs == 0 || p->saveState.litProbs == 0)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ p->lclp = lclp;
++ }
++ }
++
++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
++
++ if (beforeSize + p->dictSize < keepWindowSize)
++ beforeSize = keepWindowSize - p->dictSize;
++
++ #ifndef _7ZIP_ST
++ if (p->mtMode)
++ {
++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
++ p->matchFinderObj = &p->matchFinderMt;
++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
++ }
++ else
++ #endif
++ {
++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
++ return SZ_ERROR_MEM;
++ p->matchFinderObj = &p->matchFinderBase;
++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
++ }
++ return SZ_OK;
++}
++
++void LzmaEnc_Init(CLzmaEnc *p)
++{
++ UInt32 i;
++ p->state = 0;
++ for (i = 0 ; i < LZMA_NUM_REPS; i++)
++ p->reps[i] = 0;
++
++ RangeEnc_Init(&p->rc);
++
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ UInt32 j;
++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
++ {
++ p->isMatch[i][j] = kProbInitValue;
++ p->isRep0Long[i][j] = kProbInitValue;
++ }
++ p->isRep[i] = kProbInitValue;
++ p->isRepG0[i] = kProbInitValue;
++ p->isRepG1[i] = kProbInitValue;
++ p->isRepG2[i] = kProbInitValue;
++ }
++
++ {
++ UInt32 num = 0x300 << (p->lp + p->lc);
++ for (i = 0; i < num; i++)
++ p->litProbs[i] = kProbInitValue;
++ }
++
++ {
++ for (i = 0; i < kNumLenToPosStates; i++)
++ {
++ CLzmaProb *probs = p->posSlotEncoder[i];
++ UInt32 j;
++ for (j = 0; j < (1 << kNumPosSlotBits); j++)
++ probs[j] = kProbInitValue;
++ }
++ }
++ {
++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
++ p->posEncoders[i] = kProbInitValue;
++ }
++
++ LenEnc_Init(&p->lenEnc.p);
++ LenEnc_Init(&p->repLenEnc.p);
++
++ for (i = 0; i < (1 << kNumAlignBits); i++)
++ p->posAlignEncoder[i] = kProbInitValue;
++
++ p->optimumEndIndex = 0;
++ p->optimumCurrentIndex = 0;
++ p->additionalOffset = 0;
++
++ p->pbMask = (1 << p->pb) - 1;
++ p->lpMask = (1 << p->lp) - 1;
++}
++
++void LzmaEnc_InitPrices(CLzmaEnc *p)
++{
++ if (!p->fastMode)
++ {
++ FillDistancesPrices(p);
++ FillAlignPrices(p);
++ }
++
++ p->lenEnc.tableSize =
++ p->repLenEnc.tableSize =
++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
++}
++
++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 i;
++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
++ if (p->dictSize <= ((UInt32)1 << i))
++ break;
++ p->distTableSize = i * 2;
++
++ p->finished = False;
++ p->result = SZ_OK;
++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ p->nowPos64 = 0;
++ return SZ_OK;
++}
++
++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ p->rc.outStream = outStream;
++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
++}
++
++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
++ ISeqInStream *inStream, UInt32 keepWindowSize,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
++{
++ p->matchFinderBase.directInput = 1;
++ p->matchFinderBase.bufferBase = (Byte *)src;
++ p->matchFinderBase.directInputRem = srcLen;
++}
++
++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++ p->needInit = 1;
++
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++void LzmaEnc_Finish(CLzmaEncHandle pp)
++{
++ #ifndef _7ZIP_ST
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ if (p->mtMode)
++ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
++ #else
++ pp = pp;
++ #endif
++}
++
++typedef struct
++{
++ ISeqOutStream funcTable;
++ Byte *data;
++ SizeT rem;
++ Bool overflow;
++} CSeqOutStreamBuf;
++
++static size_t MyWrite(void *pp, const void *data, size_t size)
++{
++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
++ if (p->rem < size)
++ {
++ size = p->rem;
++ p->overflow = True;
++ }
++ memcpy(p->data, data, size);
++ p->rem -= size;
++ p->data += size;
++ return size;
++}
++
++
++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++}
++
++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++}
++
++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ UInt64 nowPos64;
++ SRes res;
++ CSeqOutStreamBuf outStream;
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = False;
++ p->finished = False;
++ p->result = SZ_OK;
++
++ if (reInit)
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ nowPos64 = p->nowPos64;
++ RangeEnc_Init(&p->rc);
++ p->rc.outStream = &outStream.funcTable;
++
++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
++
++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++
++ return res;
++}
++
++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
++{
++ SRes res = SZ_OK;
++
++ #ifndef _7ZIP_ST
++ Byte allocaDummy[0x300];
++ int i = 0;
++ for (i = 0; i < 16; i++)
++ allocaDummy[i] = (Byte)i;
++ #endif
++
++ for (;;)
++ {
++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
++ if (res != SZ_OK || p->finished != 0)
++ break;
++ if (progress != 0)
++ {
++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
++ if (res != SZ_OK)
++ {
++ res = SZ_ERROR_PROGRESS;
++ break;
++ }
++ }
++ }
++ LzmaEnc_Finish(p);
++ return res;
++}
++
++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
++}
++
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ int i;
++ UInt32 dictSize = p->dictSize;
++ if (*size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_PARAM;
++ *size = LZMA_PROPS_SIZE;
++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
++
++ for (i = 11; i <= 30; i++)
++ {
++ if (dictSize <= ((UInt32)2 << i))
++ {
++ dictSize = (2 << i);
++ break;
++ }
++ if (dictSize <= ((UInt32)3 << i))
++ {
++ dictSize = (3 << i);
++ break;
++ }
++ }
++
++ for (i = 0; i < 4; i++)
++ props[1 + i] = (Byte)(dictSize >> (8 * i));
++ return SZ_OK;
++}
++
++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ SRes res;
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++
++ CSeqOutStreamBuf outStream;
++
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = writeEndMark;
++
++ p->rc.outStream = &outStream.funcTable;
++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
++ if (res == SZ_OK)
++ res = LzmaEnc_Encode2(p, progress);
++
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++ return res;
++}
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
++ SRes res;
++ if (p == 0)
++ return SZ_ERROR_MEM;
++
++ res = LzmaEnc_SetProps(p, props);
++ if (res == SZ_OK)
++ {
++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
++ if (res == SZ_OK)
++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
++ writeEndMark, progress, alloc, allocBig);
++ }
++
++ LzmaEnc_Destroy(p, alloc, allocBig);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/Makefile
+@@ -0,0 +1,7 @@
++lzma_compress-objs := LzFind.o LzmaEnc.o
++lzma_decompress-objs := LzmaDec.o
++
++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
++
++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/target/linux/generic/pending-4.14/532-jffs2_eofdetect.patch b/target/linux/generic/pending-4.14/532-jffs2_eofdetect.patch
new file mode 100644
index 0000000..e9952c6
--- /dev/null
+++ b/target/linux/generic/pending-4.14/532-jffs2_eofdetect.patch
@@ -0,0 +1,65 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: fs: jffs2: EOF marker
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ fs/jffs2/build.c | 10 ++++++++++
+ fs/jffs2/scan.c | 21 +++++++++++++++++++--
+ 2 files changed, 29 insertions(+), 2 deletions(-)
+
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -117,6 +117,16 @@ static int jffs2_build_filesystem(struct
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
+
++ if (c->flags & (1 << 7)) {
++ printk("%s(): unlocking the mtd device... ", __func__);
++ mtd_unlock(c->mtd, 0, c->mtd->size);
++ printk("done.\n");
++
++ printk("%s(): erasing all blocks after the end marker... ", __func__);
++ jffs2_erase_pending_blocks(c, -1);
++ printk("done.\n");
++ }
++
+ dbg_fsbuild("pass 1 starting\n");
+ c->flags |= JFFS2_SB_FLAG_BUILDING;
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+- buf_size, s);
++ if (c->flags & (1 << 7)) {
++ if (mtd_block_isbad(c->mtd, jeb->offset))
++ ret = BLK_STATE_BADBLOCK;
++ else
++ ret = BLK_STATE_ALLFF;
++ } else
++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
++ buf_size, s);
+
+ if (ret < 0)
+ goto out;
+@@ -561,6 +567,17 @@ full_scan:
+ return err;
+ }
+
++ if ((buf[0] == 0xde) &&
++ (buf[1] == 0xad) &&
++ (buf[2] == 0xc0) &&
++ (buf[3] == 0xde)) {
++ /* end of filesystem. erase everything after this point */
++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
++ c->flags |= (1 << 7);
++
++ return BLK_STATE_ALLFF;
++ }
++
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+ max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
diff --git a/target/linux/generic/pending-4.14/551-ubifs-fix-default-compression-selection.patch b/target/linux/generic/pending-4.14/551-ubifs-fix-default-compression-selection.patch
new file mode 100644
index 0000000..4782fc9
--- /dev/null
+++ b/target/linux/generic/pending-4.14/551-ubifs-fix-default-compression-selection.patch
@@ -0,0 +1,37 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: fs: ubifs: fix default compression selection in ubifs
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ fs/ubifs/sb.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -63,6 +63,17 @@
+ /* Default time granularity in nanoseconds */
+ #define DEFAULT_TIME_GRAN 1000000000
+
++static int get_default_compressor(void)
++{
++ if (ubifs_compr_present(UBIFS_COMPR_LZO))
++ return UBIFS_COMPR_LZO;
++
++ if (ubifs_compr_present(UBIFS_COMPR_ZLIB))
++ return UBIFS_COMPR_ZLIB;
++
++ return UBIFS_COMPR_NONE;
++}
++
+ /**
+ * create_default_filesystem - format empty UBI volume.
+ * @c: UBIFS file-system description object
+@@ -186,7 +197,7 @@ static int create_default_filesystem(str
+ if (c->mount_opts.override_compr)
+ sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
+ else
+- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
++ sup->default_compr = cpu_to_le16(get_default_compressor());
+
+ generate_random_uuid(sup->uuid);
+
diff --git a/target/linux/generic/pending-4.14/600-netfilter_conntrack_flush.patch b/target/linux/generic/pending-4.14/600-netfilter_conntrack_flush.patch
new file mode 100644
index 0000000..f39ed62
--- /dev/null
+++ b/target/linux/generic/pending-4.14/600-netfilter_conntrack_flush.patch
@@ -0,0 +1,95 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: add support for flushing conntrack via /proc
+
+lede-commit 8193bbe59a74d34d6a26d4a8cb857b1952905314
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_conntrack_standalone.c | 59 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 58 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -17,6 +17,7 @@
+ #include <linux/percpu.h>
+ #include <linux/netdevice.h>
+ #include <linux/security.h>
++#include <linux/inet.h>
+ #include <net/net_namespace.h>
+ #ifdef CONFIG_SYSCTL
+ #include <linux/sysctl.h>
+@@ -377,10 +378,66 @@ static int ct_open(struct inode *inode,
+ sizeof(struct ct_iter_state));
+ }
+
++struct kill_request {
++ u16 family;
++ union nf_inet_addr addr;
++};
++
++static int kill_matching(struct nf_conn *i, void *data)
++{
++ struct kill_request *kr = data;
++ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
++ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple;
++
++ if (!kr->family)
++ return 1;
++
++ if (t1->src.l3num != kr->family)
++ return 0;
++
++ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3));
++}
++
++static ssize_t ct_file_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct seq_file *seq = file->private_data;
++ struct net *net = seq_file_net(seq);
++ struct kill_request kr = { };
++ char req[INET6_ADDRSTRLEN] = { };
++
++ if (count == 0)
++ return 0;
++
++ if (count >= INET6_ADDRSTRLEN)
++ count = INET6_ADDRSTRLEN - 1;
++
++ if (copy_from_user(req, buf, count))
++ return -EFAULT;
++
++ if (strnchr(req, count, ':')) {
++ kr.family = AF_INET6;
++ if (!in6_pton(req, count, (void *)&kr.addr, '\n', NULL))
++ return -EINVAL;
++ } else if (strnchr(req, count, '.')) {
++ kr.family = AF_INET;
++ if (!in4_pton(req, count, (void *)&kr.addr, '\n', NULL))
++ return -EINVAL;
++ }
++
++ nf_ct_iterate_cleanup_net(net, kill_matching, &kr, 0, 0);
++
++ return count;
++}
++
+ static const struct file_operations ct_file_ops = {
+ .owner = THIS_MODULE,
+ .open = ct_open,
+ .read = seq_read,
++ .write = ct_file_write,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+ };
+@@ -484,7 +541,7 @@ static int nf_conntrack_standalone_init_
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+- pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
++ pde = proc_create("nf_conntrack", 0660, net->proc_net, &ct_file_ops);
+ if (!pde)
+ goto out_nf_conntrack;
+
diff --git a/target/linux/generic/pending-4.14/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/pending-4.14/610-netfilter_match_bypass_default_checks.patch
new file mode 100644
index 0000000..0a3c0f9
--- /dev/null
+++ b/target/linux/generic/pending-4.14/610-netfilter_match_bypass_default_checks.patch
@@ -0,0 +1,110 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a new version of my netfilter speedup patches for linux 2.6.39 and 3.0
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/uapi/linux/netfilter_ipv4/ip_tables.h | 1 +
+ net/ipv4/netfilter/ip_tables.c | 37 +++++++++++++++++++++++++++
+ 2 files changed, 38 insertions(+)
+
+--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
++++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
+@@ -89,6 +89,7 @@ struct ipt_ip {
+ #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
+ #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
+ #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
+
+ /* Values for "inv" field in struct ipt_ip. */
+ #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -52,6 +52,9 @@ ip_packet_match(const struct iphdr *ip,
+ {
+ unsigned long ret;
+
++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
++ return true;
++
+ if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+ (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+ NF_INVF(ipinfo, IPT_INV_DSTIP,
+@@ -82,6 +85,29 @@ ip_packet_match(const struct iphdr *ip,
+ return true;
+ }
+
++static void
++ip_checkdefault(struct ipt_ip *ip)
++{
++ static const char iface_mask[IFNAMSIZ] = {};
++
++ if (ip->invflags || ip->flags & IPT_F_FRAG)
++ return;
++
++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (ip->smsk.s_addr || ip->dmsk.s_addr)
++ return;
++
++ if (ip->proto)
++ return;
++
++ ip->flags |= IPT_F_NO_DEF_MATCH;
++}
++
+ static bool
+ ip_checkentry(const struct ipt_ip *ip)
+ {
+@@ -532,6 +558,8 @@ find_check_entry(struct ipt_entry *e, st
+ struct xt_mtchk_param mtpar;
+ struct xt_entry_match *ematch;
+
++ ip_checkdefault(&e->ip);
++
+ if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
+ return -ENOMEM;
+
+@@ -812,6 +840,7 @@ copy_entries_to_user(unsigned int total_
+ const struct xt_table_info *private = table->private;
+ int ret = 0;
+ const void *loc_cpu_entry;
++ u8 flags;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+@@ -839,6 +868,14 @@ copy_entries_to_user(unsigned int total_
+ goto free_counters;
+ }
+
++ flags = e->ip.flags & IPT_F_MASK;
++ if (copy_to_user(userptr + off
++ + offsetof(struct ipt_entry, ip.flags),
++ &flags, sizeof(flags)) != 0) {
++ ret = -EFAULT;
++ goto free_counters;
++ }
++
+ for (i = sizeof(struct ipt_entry);
+ i < e->target_offset;
+ i += m->u.match_size) {
+@@ -1219,12 +1256,15 @@ compat_copy_entry_to_user(struct ipt_ent
+ compat_uint_t origsize;
+ const struct xt_entry_match *ematch;
+ int ret = 0;
++ u8 flags = e->ip.flags & IPT_F_MASK;
+
+ origsize = *size;
+ ce = *dstptr;
+ if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
+ copy_to_user(&ce->counters, &counters[i],
+- sizeof(counters[i])) != 0)
++ sizeof(counters[i])) != 0 ||
++ copy_to_user(&ce->ip.flags, &flags,
++ sizeof(flags)) != 0)
+ return -EFAULT;
+
+ *dstptr += sizeof(struct compat_ipt_entry);
diff --git a/target/linux/generic/pending-4.14/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/pending-4.14/611-netfilter_match_bypass_default_table.patch
new file mode 100644
index 0000000..11f07e1
--- /dev/null
+++ b/target/linux/generic/pending-4.14/611-netfilter_match_bypass_default_table.patch
@@ -0,0 +1,111 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: match bypass default table
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/ipv4/netfilter/ip_tables.c | 79 +++++++++++++++++++++++++++++++-----------
+ 1 file changed, 58 insertions(+), 21 deletions(-)
+
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -248,6 +248,33 @@ struct ipt_entry *ipt_next_entry(const s
+ return (void *)entry + entry->next_offset;
+ }
+
++static bool
++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
++{
++ struct xt_entry_target *t;
++ struct xt_standard_target *st;
++
++ if (e->target_offset != sizeof(struct ipt_entry))
++ return false;
++
++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
++ return false;
++
++ t = ipt_get_target(e);
++ if (t->u.kernel.target->target)
++ return false;
++
++ st = (struct xt_standard_target *) t;
++ if (st->verdict == XT_RETURN)
++ return false;
++
++ if (st->verdict >= 0)
++ return false;
++
++ *verdict = (unsigned)(-st->verdict) - 1;
++ return true;
++}
++
+ /* Returns one of the generic firewall policies, like NF_ACCEPT. */
+ unsigned int
+ ipt_do_table(struct sk_buff *skb,
+@@ -268,24 +295,8 @@ ipt_do_table(struct sk_buff *skb,
+ unsigned int addend;
+
+ /* Initialization */
+- stackidx = 0;
+- ip = ip_hdr(skb);
+- indev = state->in ? state->in->name : nulldevname;
+- outdev = state->out ? state->out->name : nulldevname;
+- /* We handle fragments by dealing with the first fragment as
+- * if it was a normal packet. All other fragments are treated
+- * normally, except that they will NEVER match rules that ask
+- * things we don't know, ie. tcp syn flag or ports). If the
+- * rule is also a fragment-specific rule, non-fragments won't
+- * match it. */
+- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+- acpar.thoff = ip_hdrlen(skb);
+- acpar.hotdrop = false;
+- acpar.state = state;
+-
+ WARN_ON(!(table->valid_hooks & (1 << hook)));
+ local_bh_disable();
+- addend = xt_write_recseq_begin();
+ private = table->private;
+ cpu = smp_processor_id();
+ /*
+@@ -294,6 +305,23 @@ ipt_do_table(struct sk_buff *skb,
+ */
+ smp_read_barrier_depends();
+ table_base = private->entries;
++
++ e = get_entry(table_base, private->hook_entry[hook]);
++ if (ipt_handle_default_rule(e, &verdict)) {
++ struct xt_counters *counter;
++
++ counter = xt_get_this_cpu_counter(&e->counters);
++ ADD_COUNTER(*counter, skb->len, 1);
++ local_bh_enable();
++ return verdict;
++ }
++
++ stackidx = 0;
++ ip = ip_hdr(skb);
++ indev = state->in ? state->in->name : nulldevname;
++ outdev = state->out ? state->out->name : nulldevname;
++
++ addend = xt_write_recseq_begin();
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+@@ -306,7 +334,16 @@ ipt_do_table(struct sk_buff *skb,
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
+
+- e = get_entry(table_base, private->hook_entry[hook]);
++ /* We handle fragments by dealing with the first fragment as
++ * if it was a normal packet. All other fragments are treated
++ * normally, except that they will NEVER match rules that ask
++ * things we don't know, ie. tcp syn flag or ports). If the
++ * rule is also a fragment-specific rule, non-fragments won't
++ * match it. */
++ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
++ acpar.thoff = ip_hdrlen(skb);
++ acpar.hotdrop = false;
++ acpar.state = state;
+
+ do {
+ const struct xt_entry_target *t;
diff --git a/target/linux/generic/pending-4.14/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/pending-4.14/612-netfilter_match_reduce_memory_access.patch
new file mode 100644
index 0000000..183c74c
--- /dev/null
+++ b/target/linux/generic/pending-4.14/612-netfilter_match_reduce_memory_access.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: reduce match memory access
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/ipv4/netfilter/ip_tables.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -55,9 +55,9 @@ ip_packet_match(const struct iphdr *ip,
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
+- if (NF_INVF(ipinfo, IPT_INV_SRCIP,
++ if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr &&
+ (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+- NF_INVF(ipinfo, IPT_INV_DSTIP,
++ NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr &&
+ (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+ return false;
+
diff --git a/target/linux/generic/pending-4.14/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/pending-4.14/613-netfilter_optional_tcp_window_check.patch
new file mode 100644
index 0000000..3ded901
--- /dev/null
+++ b/target/linux/generic/pending-4.14/613-netfilter_optional_tcp_window_check.patch
@@ -0,0 +1,44 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: optional tcp window check
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -33,6 +33,9 @@
+ #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+ #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
++/* Do not check the TCP window for incoming packets */
++static int nf_ct_tcp_no_window_check __read_mostly = 1;
++
+ /* "Be conservative in what you do,
+ be liberal in what you accept from others."
+ If it's non-zero, we mark only out of window RST segments as INVALID. */
+@@ -505,6 +508,9 @@ static bool tcp_in_window(const struct n
+ s32 receiver_offset;
+ bool res, in_recv_win;
+
++ if (nf_ct_tcp_no_window_check)
++ return true;
++
+ /*
+ * Get the required data from the packet.
+ */
+@@ -1486,6 +1492,13 @@ static struct ctl_table tcp_sysctl_table
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
++ {
++ .procname = "nf_conntrack_tcp_no_window_check",
++ .data = &nf_ct_tcp_no_window_check,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
+ { }
+ };
+ #endif /* CONFIG_SYSCTL */
diff --git a/target/linux/generic/pending-4.14/616-net_optimize_xfrm_calls.patch b/target/linux/generic/pending-4.14/616-net_optimize_xfrm_calls.patch
new file mode 100644
index 0000000..c64694e
--- /dev/null
+++ b/target/linux/generic/pending-4.14/616-net_optimize_xfrm_calls.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a small xfrm related performance optimization
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_nat_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -90,6 +90,9 @@ int nf_xfrm_me_harder(struct net *net, s
+ struct dst_entry *dst;
+ int err;
+
++ if (skb->dev && !dev_net(skb->dev)->xfrm.policy_count[XFRM_POLICY_OUT])
++ return 0;
++
+ err = xfrm_decode_session(skb, &fl, family);
+ if (err < 0)
+ return err;
diff --git a/target/linux/generic/pending-4.14/630-packet_socket_type.patch b/target/linux/generic/pending-4.14/630-packet_socket_type.patch
new file mode 100644
index 0000000..7a0f924
--- /dev/null
+++ b/target/linux/generic/pending-4.14/630-packet_socket_type.patch
@@ -0,0 +1,138 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: add an optimization for dealing with raw sockets
+
+lede-commit: 4898039703d7315f0f3431c860123338ec3be0f6
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/uapi/linux/if_packet.h | 3 +++
+ net/packet/af_packet.c | 34 +++++++++++++++++++++++++++-------
+ net/packet/internal.h | 1 +
+ 3 files changed, 31 insertions(+), 7 deletions(-)
+
+--- a/include/uapi/linux/if_packet.h
++++ b/include/uapi/linux/if_packet.h
+@@ -32,6 +32,8 @@ struct sockaddr_ll {
+ #define PACKET_KERNEL 7 /* To kernel space */
+ /* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
+ #define PACKET_FASTROUTE 6 /* Fastrouted frame */
++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */
++
+
+ /* Packet socket options */
+
+@@ -57,6 +59,7 @@ struct sockaddr_ll {
+ #define PACKET_QDISC_BYPASS 20
+ #define PACKET_ROLLOVER_STATS 21
+ #define PACKET_FANOUT_DATA 22
++#define PACKET_RECV_TYPE 23
+
+ #define PACKET_FANOUT_HASH 0
+ #define PACKET_FANOUT_LB 1
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1829,6 +1829,7 @@ static int packet_rcv_spkt(struct sk_buf
+ {
+ struct sock *sk;
+ struct sockaddr_pkt *spkt;
++ struct packet_sock *po;
+
+ /*
+ * When we registered the protocol we saved the socket in the data
+@@ -1836,6 +1837,7 @@ static int packet_rcv_spkt(struct sk_buf
+ */
+
+ sk = pt->af_packet_priv;
++ po = pkt_sk(sk);
+
+ /*
+ * Yank back the headers [hope the device set this
+@@ -1848,7 +1850,7 @@ static int packet_rcv_spkt(struct sk_buf
+ * so that this procedure is noop.
+ */
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto out;
+
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+@@ -2075,12 +2077,12 @@ static int packet_rcv(struct sk_buff *sk
+ unsigned int snaplen, res;
+ bool is_drop_n_account = false;
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -2206,12 +2208,12 @@ static int tpacket_rcv(struct sk_buff *s
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -3252,6 +3254,7 @@ static int packet_create(struct net *net
+ mutex_init(&po->pg_vec_lock);
+ po->rollover = NULL;
+ po->prot_hook.func = packet_rcv;
++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
+
+ if (sock->type == SOCK_PACKET)
+ po->prot_hook.func = packet_rcv_spkt;
+@@ -3838,6 +3841,16 @@ packet_setsockopt(struct socket *sock, i
+ po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ return 0;
+ }
++ case PACKET_RECV_TYPE:
++ {
++ unsigned int val;
++ if (optlen != sizeof(val))
++ return -EINVAL;
++ if (copy_from_user(&val, optval, sizeof(val)))
++ return -EFAULT;
++ po->pkt_type = val & ~BIT(PACKET_LOOPBACK);
++ return 0;
++ }
+ default:
+ return -ENOPROTOOPT;
+ }
+@@ -3891,6 +3904,13 @@ static int packet_getsockopt(struct sock
+ case PACKET_VNET_HDR:
+ val = po->has_vnet_hdr;
+ break;
++ case PACKET_RECV_TYPE:
++ if (len > sizeof(unsigned int))
++ len = sizeof(unsigned int);
++ val = po->pkt_type;
++
++ data = &val;
++ break;
+ case PACKET_VERSION:
+ val = po->tp_version;
+ break;
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -132,6 +132,7 @@ struct packet_sock {
+ struct net_device __rcu *cached_dev;
+ int (*xmit)(struct sk_buff *skb);
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
++ unsigned int pkt_type;
+ };
+
+ static struct packet_sock *pkt_sk(struct sock *sk)
diff --git a/target/linux/generic/pending-4.14/650-pppoe_header_pad.patch b/target/linux/generic/pending-4.14/650-pppoe_header_pad.patch
new file mode 100644
index 0000000..3115073
--- /dev/null
+++ b/target/linux/generic/pending-4.14/650-pppoe_header_pad.patch
@@ -0,0 +1,29 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: pppoe: add extra padding for the header (useful for drivers that need headroom)
+
+lede-commit 6517a757ec711fc3354b857e273e2621042f3c7a
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/net/ppp/pppoe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -861,7 +861,7 @@ static int pppoe_sendmsg(struct socket *
+ goto end;
+
+
+- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
++ skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32 + NET_SKB_PAD,
+ 0, GFP_KERNEL);
+ if (!skb) {
+ error = -ENOMEM;
+@@ -869,7 +869,7 @@ static int pppoe_sendmsg(struct socket *
+ }
+
+ /* Reserve space for headers. */
+- skb_reserve(skb, dev->hard_header_len);
++ skb_reserve(skb, dev->hard_header_len + NET_SKB_PAD);
+ skb_reset_network_header(skb);
+
+ skb->dev = dev;
diff --git a/target/linux/generic/pending-4.14/655-increase_skb_pad.patch b/target/linux/generic/pending-4.14/655-increase_skb_pad.patch
new file mode 100644
index 0000000..4bc8a5e
--- /dev/null
+++ b/target/linux/generic/pending-4.14/655-increase_skb_pad.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a few patches for avoiding unnecessary skb reallocations - significantly improves ethernet<->wireless performance
+
+lede-commit: 6f89cffc9add6939d44a6b54cf9a5e77849aa7fd
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/linux/skbuff.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2456,7 +2456,7 @@ static inline int pskb_network_may_pull(
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
+ #endif
+
+ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/pending-4.14/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch b/target/linux/generic/pending-4.14/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch
new file mode 100644
index 0000000..a114acd
--- /dev/null
+++ b/target/linux/generic/pending-4.14/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch
@@ -0,0 +1,500 @@
+From: Steven Barth <steven@midlink.org>
+Subject: Add support for MAP-E FMRs (mesh mode)
+
+MAP-E FMRs (draft-ietf-softwire-map-10) are rules for IPv4-communication
+between MAP CEs (mesh mode) without the need to forward such data to a
+border relay. This is similar to how 6rd works but for IPv4 over IPv6.
+
+Signed-off-by: Steven Barth <cyrus@openwrt.org>
+---
+ include/net/ip6_tunnel.h | 13 ++
+ include/uapi/linux/if_tunnel.h | 13 ++
+ net/ipv6/ip6_tunnel.c | 276 +++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 291 insertions(+), 11 deletions(-)
+
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -18,6 +18,18 @@
+ /* determine capability on a per-packet basis */
+ #define IP6_TNL_F_CAP_PER_PACKET 0x40000
+
++/* IPv6 tunnel FMR */
++struct __ip6_tnl_fmr {
++ struct __ip6_tnl_fmr *next; /* next fmr in list */
++ struct in6_addr ip6_prefix;
++ struct in_addr ip4_prefix;
++
++ __u8 ip6_prefix_len;
++ __u8 ip4_prefix_len;
++ __u8 ea_len;
++ __u8 offset;
++};
++
+ struct __ip6_tnl_parm {
+ char name[IFNAMSIZ]; /* name of tunnel device */
+ int link; /* ifindex of underlying L2 interface */
+@@ -29,6 +41,7 @@ struct __ip6_tnl_parm {
+ __u32 flags; /* tunnel flags */
+ struct in6_addr laddr; /* local tunnel end-point address */
+ struct in6_addr raddr; /* remote tunnel end-point address */
++ struct __ip6_tnl_fmr *fmrs; /* FMRs */
+
+ __be16 i_flags;
+ __be16 o_flags;
+--- a/include/uapi/linux/if_tunnel.h
++++ b/include/uapi/linux/if_tunnel.h
+@@ -77,10 +77,23 @@ enum {
+ IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
+ IFLA_IPTUN_FWMARK,
++ IFLA_IPTUN_FMRS,
+ __IFLA_IPTUN_MAX,
+ };
+ #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
+
++enum {
++ IFLA_IPTUN_FMR_UNSPEC,
++ IFLA_IPTUN_FMR_IP6_PREFIX,
++ IFLA_IPTUN_FMR_IP4_PREFIX,
++ IFLA_IPTUN_FMR_IP6_PREFIX_LEN,
++ IFLA_IPTUN_FMR_IP4_PREFIX_LEN,
++ IFLA_IPTUN_FMR_EA_LEN,
++ IFLA_IPTUN_FMR_OFFSET,
++ __IFLA_IPTUN_FMR_MAX,
++};
++#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1)
++
+ enum tunnel_encap_types {
+ TUNNEL_ENCAP_NONE,
+ TUNNEL_ENCAP_FOU,
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -16,6 +16,8 @@
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
++ * Changes:
++ * Steven Barth <cyrus@openwrt.org>: MAP-E FMR support
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -72,9 +74,9 @@ static bool log_ecn_error = true;
+ module_param(log_ecn_error, bool, 0644);
+ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
++static u32 HASH(const struct in6_addr *addr)
+ {
+- u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
++ u32 hash = ipv6_addr_hash(addr);
+
+ return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
+ }
+@@ -141,20 +143,29 @@ static struct net_device_stats *ip6_get_
+ static struct ip6_tnl *
+ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
+ {
+- unsigned int hash = HASH(remote, local);
++ unsigned int hash = HASH(local);
+ struct ip6_tnl *t;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ struct in6_addr any;
++ struct __ip6_tnl_fmr *fmr;
+
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+- if (ipv6_addr_equal(local, &t->parms.laddr) &&
+- ipv6_addr_equal(remote, &t->parms.raddr) &&
+- (t->dev->flags & IFF_UP))
++ if (!ipv6_addr_equal(local, &t->parms.laddr) ||
++ !(t->dev->flags & IFF_UP))
++ continue;
++
++ if (ipv6_addr_equal(remote, &t->parms.raddr))
+ return t;
++
++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
++ if (ipv6_prefix_equal(remote, &fmr->ip6_prefix,
++ fmr->ip6_prefix_len))
++ return t;
++ }
+ }
+
+ memset(&any, 0, sizeof(any));
+- hash = HASH(&any, local);
++ hash = HASH(local);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_any(&t->parms.raddr) &&
+@@ -162,7 +173,7 @@ ip6_tnl_lookup(struct net *net, const st
+ return t;
+ }
+
+- hash = HASH(remote, &any);
++ hash = HASH(&any);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ ipv6_addr_any(&t->parms.laddr) &&
+@@ -202,7 +213,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n,
+
+ if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+ prio = 1;
+- h = HASH(remote, local);
++ h = HASH(local);
+ }
+ return &ip6n->tnls[prio][h];
+ }
+@@ -380,6 +391,12 @@ ip6_tnl_dev_uninit(struct net_device *de
+ struct net *net = t->net;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+
++ while (t->parms.fmrs) {
++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
++ kfree(t->parms.fmrs);
++ t->parms.fmrs = next;
++ }
++
+ if (dev == ip6n->fb_tnl_dev)
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+ else
+@@ -776,6 +793,107 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+ }
+ EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
+
++/**
++ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR
++ * @dest: destination IPv6 address buffer
++ * @skb: received socket buffer
++ * @fmr: MAP FMR
++ * @xmit: Calculate for xmit or rcv
++ **/
++static void ip4ip6_fmr_calc(struct in6_addr *dest,
++ const struct iphdr *iph, const uint8_t *end,
++ const struct __ip6_tnl_fmr *fmr, bool xmit)
++{
++ int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len);
++ u8 *portp = NULL;
++ bool use_dest_addr;
++ const struct iphdr *dsth = iph;
++
++ if ((u8*)dsth >= end)
++ return;
++
++ /* find significant IP header */
++ if (iph->protocol == IPPROTO_ICMP) {
++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
++ if (ih && ((u8*)&ih[1]) <= end && (
++ ih->type == ICMP_DEST_UNREACH ||
++ ih->type == ICMP_SOURCE_QUENCH ||
++ ih->type == ICMP_TIME_EXCEEDED ||
++ ih->type == ICMP_PARAMETERPROB ||
++ ih->type == ICMP_REDIRECT))
++ dsth = (const struct iphdr*)&ih[1];
++ }
++
++ /* in xmit-path use dest port by default and source port only if
++ this is an ICMP reply to something else; vice versa in rcv-path */
++ use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph);
++
++ /* get dst port */
++ if (((u8*)&dsth[1]) <= end && (
++ dsth->protocol == IPPROTO_UDP ||
++ dsth->protocol == IPPROTO_TCP ||
++ dsth->protocol == IPPROTO_SCTP ||
++ dsth->protocol == IPPROTO_DCCP)) {
++ /* for UDP, TCP, SCTP and DCCP source and dest port
++ follow IPv4 header directly */
++ portp = ((u8*)dsth) + dsth->ihl * 4;
++
++ if (use_dest_addr)
++ portp += sizeof(u16);
++ } else if (iph->protocol == IPPROTO_ICMP) {
++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
++
++ /* use icmp identifier as port */
++ if (((u8*)&ih) <= end && (
++ (use_dest_addr && (
++ ih->type == ICMP_ECHOREPLY ||
++ ih->type == ICMP_TIMESTAMPREPLY ||
++ ih->type == ICMP_INFO_REPLY ||
++ ih->type == ICMP_ADDRESSREPLY)) ||
++ (!use_dest_addr && (
++ ih->type == ICMP_ECHO ||
++ ih->type == ICMP_TIMESTAMP ||
++ ih->type == ICMP_INFO_REQUEST ||
++ ih->type == ICMP_ADDRESS)
++ )))
++ portp = (u8*)&ih->un.echo.id;
++ }
++
++ if ((portp && &portp[2] <= end) || psidlen == 0) {
++ int frombyte = fmr->ip6_prefix_len / 8;
++ int fromrem = fmr->ip6_prefix_len % 8;
++ int bytes = sizeof(struct in6_addr) - frombyte;
++ const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr;
++ u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len);
++ u64 t = 0;
++
++ /* extract PSID from port and add it to eabits */
++ u16 psidbits = 0;
++ if (psidlen > 0) {
++ psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]);
++ psidbits >>= 16 - psidlen - fmr->offset;
++ psidbits = (u16)(psidbits << (16 - psidlen));
++ eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen));
++ }
++
++ /* rewrite destination address */
++ *dest = fmr->ip6_prefix;
++ memcpy(&dest->s6_addr[10], addr, sizeof(*addr));
++ dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen));
++
++ if (bytes > sizeof(u64))
++ bytes = sizeof(u64);
++
++ /* insert eabits */
++ memcpy(&t, &dest->s6_addr[frombyte], bytes);
++ t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1)
++ << (64 - fmr->ea_len - fromrem));
++ t = cpu_to_be64(t | (eabits >> fromrem));
++ memcpy(&dest->s6_addr[frombyte], &t, bytes);
++ }
++}
++
++
+ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi,
+ struct metadata_dst *tun_dst,
+@@ -828,6 +946,27 @@ static int __ip6_tnl_rcv(struct ip6_tnl
+ skb_reset_network_header(skb);
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+
++ if (tpi->proto == htons(ETH_P_IP) &&
++ !ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) {
++ /* Packet didn't come from BR, so lookup FMR */
++ struct __ip6_tnl_fmr *fmr;
++ struct in6_addr expected = tunnel->parms.raddr;
++ for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next)
++ if (ipv6_prefix_equal(&ipv6h->saddr,
++ &fmr->ip6_prefix, fmr->ip6_prefix_len))
++ break;
++
++ /* Check that IPv6 matches IPv4 source to prevent spoofing */
++ if (fmr)
++ ip4ip6_fmr_calc(&expected, ip_hdr(skb),
++ skb_tail_pointer(skb), fmr, false);
++
++ if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) {
++ rcu_read_unlock();
++ goto drop;
++ }
++ }
++
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+
+ err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
+@@ -959,6 +1098,7 @@ static void init_tel_txopt(struct ipv6_t
+ opt->ops.opt_nflen = 8;
+ }
+
++
+ /**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ * @t: the outgoing tunnel device
+@@ -1295,6 +1435,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++ struct __ip6_tnl_fmr *fmr;
+ int encap_limit = -1;
+ __u16 offset;
+ struct flowi6 fl6;
+@@ -1357,6 +1498,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
+
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
++ /* try to find matching FMR */
++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
++ unsigned mshift = 32 - fmr->ip4_prefix_len;
++ if (ntohl(fmr->ip4_prefix.s_addr) >> mshift ==
++ ntohl(ip_hdr(skb)->daddr) >> mshift)
++ break;
++ }
++
++ /* change dstaddr according to FMR */
++ if (fmr)
++ ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true);
++
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+@@ -1485,6 +1638,14 @@ ip6_tnl_change(struct ip6_tnl *t, const
+ t->parms.link = p->link;
+ t->parms.proto = p->proto;
+ t->parms.fwmark = p->fwmark;
++
++ while (t->parms.fmrs) {
++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
++ kfree(t->parms.fmrs);
++ t->parms.fmrs = next;
++ }
++ t->parms.fmrs = p->fmrs;
++
+ dst_cache_reset(&t->dst_cache);
+ ip6_tnl_link_config(t);
+ return 0;
+@@ -1523,6 +1684,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_
+ p->flowinfo = u->flowinfo;
+ p->link = u->link;
+ p->proto = u->proto;
++ p->fmrs = NULL;
+ memcpy(p->name, u->name, sizeof(u->name));
+ }
+
+@@ -1904,6 +2066,15 @@ static int ip6_tnl_validate(struct nlatt
+ return 0;
+ }
+
++static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = {
++ [IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) },
++ [IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) },
++ [IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 }
++};
++
+ static void ip6_tnl_netlink_parms(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+ {
+@@ -1941,6 +2112,46 @@ static void ip6_tnl_netlink_parms(struct
+
+ if (data[IFLA_IPTUN_FWMARK])
+ parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
++
++ if (data[IFLA_IPTUN_FMRS]) {
++ unsigned rem;
++ struct nlattr *fmr;
++ nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) {
++ struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c;
++ struct __ip6_tnl_fmr *nfmr;
++
++ nla_parse_nested(fmrd, IFLA_IPTUN_FMR_MAX,
++ fmr, ip6_tnl_fmr_policy, NULL);
++
++ if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL)))
++ continue;
++
++ nfmr->offset = 6;
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX]))
++ nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX],
++ sizeof(nfmr->ip6_prefix));
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX]))
++ nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX],
++ sizeof(nfmr->ip4_prefix));
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN]))
++ nfmr->ip6_prefix_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN]))
++ nfmr->ip4_prefix_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN]))
++ nfmr->ea_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET]))
++ nfmr->offset = nla_get_u8(c);
++
++ nfmr->next = parms->fmrs;
++ parms->fmrs = nfmr;
++ }
++ }
+ }
+
+ static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
+@@ -2052,6 +2263,12 @@ static void ip6_tnl_dellink(struct net_d
+
+ static size_t ip6_tnl_get_size(const struct net_device *dev)
+ {
++ const struct ip6_tnl *t = netdev_priv(dev);
++ struct __ip6_tnl_fmr *c;
++ int fmrs = 0;
++ for (c = t->parms.fmrs; c; c = c->next)
++ ++fmrs;
++
+ return
+ /* IFLA_IPTUN_LINK */
+ nla_total_size(4) +
+@@ -2081,6 +2298,24 @@ static size_t ip6_tnl_get_size(const str
+ nla_total_size(0) +
+ /* IFLA_IPTUN_FWMARK */
+ nla_total_size(4) +
++ /* IFLA_IPTUN_FMRS */
++ nla_total_size(0) +
++ (
++ /* nest */
++ nla_total_size(0) +
++ /* IFLA_IPTUN_FMR_IP6_PREFIX */
++ nla_total_size(sizeof(struct in6_addr)) +
++ /* IFLA_IPTUN_FMR_IP4_PREFIX */
++ nla_total_size(sizeof(struct in_addr)) +
++ /* IFLA_IPTUN_FMR_EA_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_OFFSET */
++ nla_total_size(1)
++ ) * fmrs +
+ 0;
+ }
+
+@@ -2088,6 +2323,9 @@ static int ip6_tnl_fill_info(struct sk_b
+ {
+ struct ip6_tnl *tunnel = netdev_priv(dev);
+ struct __ip6_tnl_parm *parm = &tunnel->parms;
++ struct __ip6_tnl_fmr *c;
++ int fmrcnt = 0;
++ struct nlattr *fmrs;
+
+ if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
+ nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
+@@ -2097,9 +2335,27 @@ static int ip6_tnl_fill_info(struct sk_b
+ nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
+ nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
+ nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
+- nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
++ nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark) ||
++ !(fmrs = nla_nest_start(skb, IFLA_IPTUN_FMRS)))
+ goto nla_put_failure;
+
++ for (c = parm->fmrs; c; c = c->next) {
++ struct nlattr *fmr = nla_nest_start(skb, ++fmrcnt);
++ if (!fmr ||
++ nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX,
++ sizeof(c->ip6_prefix), &c->ip6_prefix) ||
++ nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX,
++ sizeof(c->ip4_prefix), &c->ip4_prefix) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset))
++ goto nla_put_failure;
++
++ nla_nest_end(skb, fmr);
++ }
++ nla_nest_end(skb, fmrs);
++
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
+@@ -2139,6 +2395,7 @@ static const struct nla_policy ip6_tnl_p
+ [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
+ [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
++ [IFLA_IPTUN_FMRS] = { .type = NLA_NESTED },
+ };
+
+ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff --git a/target/linux/generic/pending-4.14/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch b/target/linux/generic/pending-4.14/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch
new file mode 100644
index 0000000..82b9ba5
--- /dev/null
+++ b/target/linux/generic/pending-4.14/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch
@@ -0,0 +1,247 @@
+From: Jonas Gorski <jogo@openwrt.org>
+Subject: ipv6: allow rejecting with "source address failed policy"
+
+RFC6204 L-14 requires rejecting traffic from invalid addresses with
+ICMPv6 Destination Unreachable, Code 5 (Source address failed ingress/
+egress policy) on the LAN side, so add an appropriate rule for that.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/net/netns/ipv6.h | 1 +
+ include/uapi/linux/fib_rules.h | 4 +++
+ include/uapi/linux/rtnetlink.h | 1 +
+ net/ipv4/fib_semantics.c | 4 +++
+ net/ipv4/fib_trie.c | 1 +
+ net/ipv4/ipmr.c | 1 +
+ net/ipv6/fib6_rules.c | 4 +++
+ net/ipv6/ip6mr.c | 2 ++
+ net/ipv6/route.c | 58 +++++++++++++++++++++++++++++++++++++++++-
+ 9 files changed, 75 insertions(+), 1 deletion(-)
+
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -69,6 +69,7 @@ struct netns_ipv6 {
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ bool fib6_has_custom_rules;
+ struct rt6_info *ip6_prohibit_entry;
++ struct rt6_info *ip6_policy_failed_entry;
+ struct rt6_info *ip6_blk_hole_entry;
+ struct fib6_table *fib6_local_tbl;
+ struct fib_rules_ops *fib6_rules_ops;
+--- a/include/uapi/linux/fib_rules.h
++++ b/include/uapi/linux/fib_rules.h
+@@ -73,6 +73,10 @@ enum {
+ FR_ACT_BLACKHOLE, /* Drop without notification */
+ FR_ACT_UNREACHABLE, /* Drop with ENETUNREACH */
+ FR_ACT_PROHIBIT, /* Drop with EACCES */
++ FR_ACT_RES9,
++ FR_ACT_RES10,
++ FR_ACT_RES11,
++ FR_ACT_POLICY_FAILED, /* Drop with EACCES */
+ __FR_ACT_MAX,
+ };
+
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -221,6 +221,7 @@ enum {
+ RTN_THROW, /* Not in this table */
+ RTN_NAT, /* Translate this address */
+ RTN_XRESOLVE, /* Use external resolver */
++ RTN_POLICY_FAILED, /* Failed ingress/egress policy */
+ __RTN_MAX
+ };
+
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -139,6 +139,10 @@ const struct fib_prop fib_props[RTN_MAX
+ .error = -EINVAL,
+ .scope = RT_SCOPE_NOWHERE,
+ },
++ [RTN_POLICY_FAILED] = {
++ .error = -EACCES,
++ .scope = RT_SCOPE_UNIVERSE,
++ },
+ };
+
+ static void rt_fibinfo_free(struct rtable __rcu **rtp)
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2460,6 +2460,7 @@ static const char *const rtn_type_names[
+ [RTN_THROW] = "THROW",
+ [RTN_NAT] = "NAT",
+ [RTN_XRESOLVE] = "XRESOLVE",
++ [RTN_POLICY_FAILED] = "POLICY_FAILED",
+ };
+
+ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -161,6 +161,7 @@ static int ipmr_rule_action(struct fib_r
+ case FR_ACT_UNREACHABLE:
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
++ case FR_ACT_POLICY_FAILED:
+ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -121,6 +121,10 @@ static int fib6_rule_action(struct fib_r
+ err = -EACCES;
+ rt = net->ipv6.ip6_prohibit_entry;
+ goto discard_pkt;
++ case FR_ACT_POLICY_FAILED:
++ err = -EACCES;
++ rt = net->ipv6.ip6_policy_failed_entry;
++ goto discard_pkt;
+ }
+
+ tb_id = fib_rule_get_table(rule, arg);
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -168,6 +168,8 @@ static int ip6mr_rule_action(struct fib_
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
+ return -EACCES;
++ case FR_ACT_POLICY_FAILED:
++ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+ return -EINVAL;
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -91,6 +91,8 @@ static int ip6_pkt_discard(struct sk_bu
+ static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+ static int ip6_pkt_prohibit(struct sk_buff *skb);
+ static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
++static int ip6_pkt_policy_failed(struct sk_buff *skb);
++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+ static void ip6_link_failure(struct sk_buff *skb);
+ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+@@ -321,6 +323,21 @@ static const struct rt6_info ip6_prohibi
+ .rt6i_ref = ATOMIC_INIT(1),
+ };
+
++static const struct rt6_info ip6_policy_failed_entry_template = {
++ .dst = {
++ .__refcnt = ATOMIC_INIT(1),
++ .__use = 1,
++ .obsolete = DST_OBSOLETE_FORCE_CHK,
++ .error = -EACCES,
++ .input = ip6_pkt_policy_failed,
++ .output = ip6_pkt_policy_failed_out,
++ },
++ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
++ .rt6i_protocol = RTPROT_KERNEL,
++ .rt6i_metric = ~(u32) 0,
++ .rt6i_ref = ATOMIC_INIT(1),
++};
++
+ static const struct rt6_info ip6_blk_hole_entry_template = {
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+@@ -2037,6 +2054,11 @@ static struct rt6_info *ip6_route_info_c
+ rt->dst.output = ip6_pkt_prohibit_out;
+ rt->dst.input = ip6_pkt_prohibit;
+ break;
++ case RTN_POLICY_FAILED:
++ rt->dst.error = -EACCES;
++ rt->dst.output = ip6_pkt_policy_failed_out;
++ rt->dst.input = ip6_pkt_policy_failed;
++ break;
+ case RTN_THROW:
+ case RTN_UNREACHABLE:
+ default:
+@@ -2762,6 +2784,17 @@ static int ip6_pkt_prohibit_out(struct n
+ return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
+ }
+
++static int ip6_pkt_policy_failed(struct sk_buff *skb)
++{
++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_INNOROUTES);
++}
++
++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb)
++{
++ skb->dev = skb_dst(skb)->dev;
++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_OUTNOROUTES);
++}
++
+ /*
+ * Allocate a dst for local (unicast / anycast) address.
+ */
+@@ -2996,7 +3029,8 @@ static int rtm_to_fib6_config(struct sk_
+ if (rtm->rtm_type == RTN_UNREACHABLE ||
+ rtm->rtm_type == RTN_BLACKHOLE ||
+ rtm->rtm_type == RTN_PROHIBIT ||
+- rtm->rtm_type == RTN_THROW)
++ rtm->rtm_type == RTN_THROW ||
++ rtm->rtm_type == RTN_POLICY_FAILED)
+ cfg->fc_flags |= RTF_REJECT;
+
+ if (rtm->rtm_type == RTN_LOCAL)
+@@ -3486,6 +3520,9 @@ static int rt6_fill_node(struct net *net
+ case -EACCES:
+ rtm->rtm_type = RTN_PROHIBIT;
+ break;
++ case -EPERM:
++ rtm->rtm_type = RTN_POLICY_FAILED;
++ break;
+ case -EAGAIN:
+ rtm->rtm_type = RTN_THROW;
+ break;
+@@ -3801,6 +3838,8 @@ static int ip6_route_dev_notify(struct n
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ net->ipv6.ip6_prohibit_entry->dst.dev = dev;
+ net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
++ net->ipv6.ip6_policy_failed_entry->dst.dev = dev;
++ net->ipv6.ip6_policy_failed_entry->rt6i_idev = in6_dev_get(dev);
+ net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
+ net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
+ #endif
+@@ -4028,6 +4067,17 @@ static int __net_init ip6_route_net_init
+ net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+ dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+ ip6_template_metrics, true);
++
++ net->ipv6.ip6_policy_failed_entry =
++ kmemdup(&ip6_policy_failed_entry_template,
++ sizeof(*net->ipv6.ip6_policy_failed_entry), GFP_KERNEL);
++ if (!net->ipv6.ip6_policy_failed_entry)
++ goto out_ip6_blk_hole_entry;
++ net->ipv6.ip6_policy_failed_entry->dst.path =
++ (struct dst_entry *)net->ipv6.ip6_policy_failed_entry;
++ net->ipv6.ip6_policy_failed_entry->dst.ops = &net->ipv6.ip6_dst_ops;
++ dst_init_metrics(&net->ipv6.ip6_policy_failed_entry->dst,
++ ip6_template_metrics, true);
+ #endif
+
+ net->ipv6.sysctl.flush_delay = 0;
+@@ -4046,6 +4096,8 @@ out:
+ return ret;
+
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
++out_ip6_blk_hole_entry:
++ kfree(net->ipv6.ip6_blk_hole_entry);
+ out_ip6_prohibit_entry:
+ kfree(net->ipv6.ip6_prohibit_entry);
+ out_ip6_null_entry:
+@@ -4063,6 +4115,7 @@ static void __net_exit ip6_route_net_exi
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ kfree(net->ipv6.ip6_prohibit_entry);
+ kfree(net->ipv6.ip6_blk_hole_entry);
++ kfree(net->ipv6.ip6_policy_failed_entry);
+ #endif
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
+ }
+@@ -4136,6 +4189,9 @@ void __init ip6_route_init_special_entri
+ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
++ init_net.ipv6.ip6_policy_failed_entry->dst.dev = init_net.loopback_dev;
++ init_net.ipv6.ip6_policy_failed_entry->rt6i_idev =
++ in6_dev_get(init_net.loopback_dev);
+ #endif
+ }
+
diff --git a/target/linux/generic/pending-4.14/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch b/target/linux/generic/pending-4.14/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch
new file mode 100644
index 0000000..cfea527
--- /dev/null
+++ b/target/linux/generic/pending-4.14/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch
@@ -0,0 +1,50 @@
+From: Jonas Gorski <jogo@openwrt.org>
+Subject: net: provide defines for _POLICY_FAILED until all code is updated
+
+Upstream introduced ICMPV6_POLICY_FAIL for code 5 of destination
+unreachable, conflicting with our name.
+
+Add appropriate defines to allow our code to build with the new
+name until we have updated our local patches for older kernels
+and userspace packages.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/uapi/linux/fib_rules.h | 2 ++
+ include/uapi/linux/icmpv6.h | 2 ++
+ include/uapi/linux/rtnetlink.h | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/include/uapi/linux/fib_rules.h
++++ b/include/uapi/linux/fib_rules.h
+@@ -80,6 +80,8 @@ enum {
+ __FR_ACT_MAX,
+ };
+
++#define FR_ACT_FAILED_POLICY FR_ACT_POLICY_FAILED
++
+ #define FR_ACT_MAX (__FR_ACT_MAX - 1)
+
+ #endif
+--- a/include/uapi/linux/icmpv6.h
++++ b/include/uapi/linux/icmpv6.h
+@@ -119,6 +119,8 @@ struct icmp6hdr {
+ #define ICMPV6_POLICY_FAIL 5
+ #define ICMPV6_REJECT_ROUTE 6
+
++#define ICMPV6_FAILED_POLICY ICMPV6_POLICY_FAIL
++
+ /*
+ * Codes for Time Exceeded
+ */
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -225,6 +225,8 @@ enum {
+ __RTN_MAX
+ };
+
++#define RTN_FAILED_POLICY RTN_POLICY_FAILED
++
+ #define RTN_MAX (__RTN_MAX - 1)
+
+
diff --git a/target/linux/generic/pending-4.14/680-NET-skip-GRO-for-foreign-MAC-addresses.patch b/target/linux/generic/pending-4.14/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
new file mode 100644
index 0000000..4467975
--- /dev/null
+++ b/target/linux/generic/pending-4.14/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
@@ -0,0 +1,145 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: replace GRO optimization patch with a new one that supports VLANs/bridges with different MAC addresses
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/linux/netdevice.h | 2 ++
+ include/linux/skbuff.h | 3 ++-
+ net/core/dev.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++
+ net/ethernet/eth.c | 18 +++++++++++++++++-
+ 4 files changed, 69 insertions(+), 2 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1739,6 +1739,8 @@ struct net_device {
+ struct netdev_hw_addr_list mc;
+ struct netdev_hw_addr_list dev_addrs;
+
++ unsigned char local_addr_mask[MAX_ADDR_LEN];
++
+ #ifdef CONFIG_SYSFS
+ struct kset *queues_kset;
+ #endif
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -778,6 +778,7 @@ struct sk_buff {
+ __u8 tc_redirected:1;
+ __u8 tc_from_ingress:1;
+ #endif
++ __u8 gro_skip:1;
+
+ #ifdef CONFIG_NET_SCHED
+ __u16 tc_index; /* traffic control index */
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4756,6 +4756,9 @@ static enum gro_result dev_gro_receive(s
+ enum gro_result ret;
+ int grow;
+
++ if (skb->gro_skip)
++ goto normal;
++
+ if (netif_elide_gro(skb->dev))
+ goto normal;
+
+@@ -6226,6 +6229,48 @@ static void __netdev_adjacent_dev_unlink
+ &upper_dev->adj_list.lower);
+ }
+
++static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr,
++ struct net_device *dev)
++{
++ int i;
++
++ for (i = 0; i < dev->addr_len; i++)
++ mask[i] |= addr[i] ^ dev->dev_addr[i];
++}
++
++static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev,
++ struct net_device *lower)
++{
++ struct net_device *cur;
++ struct list_head *iter;
++
++ netdev_for_each_upper_dev_rcu(dev, cur, iter) {
++ __netdev_addr_mask(mask, cur->dev_addr, lower);
++ __netdev_upper_mask(mask, cur, lower);
++ }
++}
++
++static void __netdev_update_addr_mask(struct net_device *dev)
++{
++ unsigned char mask[MAX_ADDR_LEN];
++ struct net_device *cur;
++ struct list_head *iter;
++
++ memset(mask, 0, sizeof(mask));
++ __netdev_upper_mask(mask, dev, dev);
++ memcpy(dev->local_addr_mask, mask, dev->addr_len);
++
++ netdev_for_each_lower_dev(dev, cur, iter)
++ __netdev_update_addr_mask(cur);
++}
++
++static void netdev_update_addr_mask(struct net_device *dev)
++{
++ rcu_read_lock();
++ __netdev_update_addr_mask(dev);
++ rcu_read_unlock();
++}
++
+ static int __netdev_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev, bool master,
+ void *upper_priv, void *upper_info)
+@@ -6341,6 +6386,8 @@ void netdev_upper_dev_unlink(struct net_
+
+ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
+
++ netdev_update_addr_mask(dev);
++ netdev_update_addr_mask(dev);
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+ &changeupper_info.info);
+ }
+@@ -6911,6 +6958,7 @@ int dev_set_mac_address(struct net_devic
+ if (err)
+ return err;
+ dev->addr_assign_type = NET_ADDR_SET;
++ netdev_update_addr_mask(dev);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return 0;
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -144,6 +144,18 @@ u32 eth_get_headlen(void *data, unsigned
+ }
+ EXPORT_SYMBOL(eth_get_headlen);
+
++static inline bool
++eth_check_local_mask(const void *addr1, const void *addr2, const void *mask)
++{
++ const u16 *a1 = addr1;
++ const u16 *a2 = addr2;
++ const u16 *m = mask;
++
++ return (((a1[0] ^ a2[0]) & ~m[0]) |
++ ((a1[1] ^ a2[1]) & ~m[1]) |
++ ((a1[2] ^ a2[2]) & ~m[2]));
++}
++
+ /**
+ * eth_type_trans - determine the packet's protocol ID.
+ * @skb: received socket data
+@@ -172,8 +184,12 @@ __be16 eth_type_trans(struct sk_buff *sk
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+ else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+- dev->dev_addr)))
++ dev->dev_addr))) {
+ skb->pkt_type = PACKET_OTHERHOST;
++ if (eth_check_local_mask(eth->h_dest, dev->dev_addr,
++ dev->local_addr_mask))
++ skb->gro_skip = 1;
++ }
+
+ /*
+ * Some variants of DSA tagging don't have an ethertype field
diff --git a/target/linux/generic/pending-4.14/681-NET-add-of_get_mac_address_mtd.patch b/target/linux/generic/pending-4.14/681-NET-add-of_get_mac_address_mtd.patch
new file mode 100644
index 0000000..b29b5f1
--- /dev/null
+++ b/target/linux/generic/pending-4.14/681-NET-add-of_get_mac_address_mtd.patch
@@ -0,0 +1,127 @@
+From: John Crispin <blogic@openwrt.org>
+Subject: NET: add mtd-mac-address support to of_get_mac_address()
+
+Many embedded devices have information such as mac addresses stored inside mtd
+devices. This patch allows us to add a property inside a node describing a
+network interface. The new property points at a mtd partition with an offset
+where the mac address can be found.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/of/of_net.c | 37 +++++++++++++++++++++++++++++++++++++
+ include/linux/of_net.h | 1 +
+ 2 files changed, 38 insertions(+)
+
+--- a/drivers/of/of_net.c
++++ b/drivers/of/of_net.c
+@@ -10,6 +10,7 @@
+ #include <linux/of_net.h>
+ #include <linux/phy.h>
+ #include <linux/export.h>
++#include <linux/mtd/mtd.h>
+
+ /**
+ * of_get_phy_mode - Get phy mode for given device_node
+@@ -38,7 +39,7 @@ int of_get_phy_mode(struct device_node *
+ }
+ EXPORT_SYMBOL_GPL(of_get_phy_mode);
+
+-static const void *of_get_mac_addr(struct device_node *np, const char *name)
++static void *of_get_mac_addr(struct device_node *np, const char *name)
+ {
+ struct property *pp = of_find_property(np, name, NULL);
+
+@@ -47,6 +48,73 @@ static const void *of_get_mac_addr(struc
+ return NULL;
+ }
+
++static const void *of_get_mac_address_mtd(struct device_node *np)
++{
++#ifdef CONFIG_MTD
++ struct device_node *mtd_np = NULL;
++ struct property *prop;
++ size_t retlen;
++ int size, ret;
++ struct mtd_info *mtd;
++ const char *part;
++ const __be32 *list;
++ phandle phandle;
++ u32 mac_inc = 0;
++ u8 mac[ETH_ALEN];
++ void *addr;
++
++ list = of_get_property(np, "mtd-mac-address", &size);
++ if (!list || (size != (2 * sizeof(*list))))
++ return NULL;
++
++ phandle = be32_to_cpup(list++);
++ if (phandle)
++ mtd_np = of_find_node_by_phandle(phandle);
++
++ if (!mtd_np)
++ return NULL;
++
++ part = of_get_property(mtd_np, "label", NULL);
++ if (!part)
++ part = mtd_np->name;
++
++ mtd = get_mtd_device_nm(part);
++ if (IS_ERR(mtd))
++ return NULL;
++
++ ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, mac);
++ put_mtd_device(mtd);
++
++ if (!of_property_read_u32(np, "mtd-mac-address-increment", &mac_inc))
++ mac[5] += mac_inc;
++
++ if (!is_valid_ether_addr(mac))
++ return NULL;
++
++ addr = of_get_mac_addr(np, "mac-address");
++ if (addr) {
++ memcpy(addr, mac, ETH_ALEN);
++ return addr;
++ }
++
++ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
++ if (!prop)
++ return NULL;
++
++ prop->name = "mac-address";
++ prop->length = ETH_ALEN;
++ prop->value = kmemdup(mac, ETH_ALEN, GFP_KERNEL);
++ if (!prop->value || of_add_property(np, prop))
++ goto free;
++
++ return prop->value;
++free:
++ kfree(prop->value);
++ kfree(prop);
++#endif
++ return NULL;
++}
++
+ /**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+@@ -64,11 +132,18 @@ static const void *of_get_mac_addr(struc
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
++ *
++ * If a mtd-mac-address property exists, try to fetch the MAC address from the
++ * specified mtd device, and store it as a 'mac-address' property
+ */
+ const void *of_get_mac_address(struct device_node *np)
+ {
+ const void *addr;
+
++ addr = of_get_mac_address_mtd(np);
++ if (addr)
++ return addr;
++
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
diff --git a/target/linux/generic/pending-4.14/701-phy_extension.patch b/target/linux/generic/pending-4.14/701-phy_extension.patch
new file mode 100644
index 0000000..1b380e2
--- /dev/null
+++ b/target/linux/generic/pending-4.14/701-phy_extension.patch
@@ -0,0 +1,95 @@
+From: John Crispin <john@phrozen.org>
+Subject: net: phy: add phy_ethtool_ioctl()
+
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ drivers/net/phy/phy.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/phy.h | 1 +
+ 2 files changed, 45 insertions(+)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -382,6 +382,73 @@ void phy_ethtool_ksettings_get(struct ph
+ }
+ EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
++static int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
++{
++ cmd->supported = phydev->supported;
++
++ cmd->advertising = phydev->advertising;
++ cmd->lp_advertising = phydev->lp_advertising;
++
++ ethtool_cmd_speed_set(cmd, phydev->speed);
++ cmd->duplex = phydev->duplex;
++ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
++ cmd->port = PORT_BNC;
++ else
++ cmd->port = PORT_MII;
++ cmd->phy_address = phydev->mdio.addr;
++ cmd->transceiver = phy_is_internal(phydev) ?
++ XCVR_INTERNAL : XCVR_EXTERNAL;
++ cmd->autoneg = phydev->autoneg;
++ cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl;
++ cmd->eth_tp_mdix = phydev->mdix;
++
++ return 0;
++}
++
++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr)
++{
++ u32 cmd;
++ int tmp;
++ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
++ struct ethtool_value edata = { ETHTOOL_GLINK };
++
++ if (get_user(cmd, (u32 *) useraddr))
++ return -EFAULT;
++
++ switch (cmd) {
++ case ETHTOOL_GSET:
++ phy_ethtool_gset(phydev, &ecmd);
++ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
++ return -EFAULT;
++ return 0;
++
++ case ETHTOOL_SSET:
++ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
++ return -EFAULT;
++ return phy_ethtool_sset(phydev, &ecmd);
++
++ case ETHTOOL_NWAY_RST:
++ /* if autoneg is off, it's an error */
++ tmp = phy_read(phydev, MII_BMCR);
++ if (tmp & BMCR_ANENABLE) {
++ tmp |= (BMCR_ANRESTART);
++ phy_write(phydev, MII_BMCR, tmp);
++ return 0;
++ }
++ return -EINVAL;
++
++ case ETHTOOL_GLINK:
++ edata.data = (phy_read(phydev,
++ MII_BMSR) & BMSR_LSTATUS) ? 1 : 0;
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++ }
++
++ return -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(phy_ethtool_ioctl);
++
+ /**
+ * phy_mii_ioctl - generic PHY MII ioctl interface
+ * @phydev: the phy_device struct
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -905,6 +905,7 @@ void phy_ethtool_ksettings_get(struct ph
+ struct ethtool_link_ksettings *cmd);
+ int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd);
++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr);
+ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
+ int phy_start_interrupts(struct phy_device *phydev);
+ void phy_print_status(struct phy_device *phydev);
diff --git a/target/linux/generic/pending-4.14/703-phy-add-detach-callback-to-struct-phy_driver.patch b/target/linux/generic/pending-4.14/703-phy-add-detach-callback-to-struct-phy_driver.patch
new file mode 100644
index 0000000..3d5f8fc
--- /dev/null
+++ b/target/linux/generic/pending-4.14/703-phy-add-detach-callback-to-struct-phy_driver.patch
@@ -0,0 +1,38 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: generic: add detach callback to struct phy_driver
+
+lede-commit: fe61fc2d7d0b3fb348b502f68f98243b3ddf5867
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/net/phy/phy_device.c | 3 +++
+ include/linux/phy.h | 6 ++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1095,6 +1095,9 @@ void phy_detach(struct phy_device *phyde
+ struct module *ndev_owner = dev->dev.parent->driver->owner;
+ struct mii_bus *bus;
+
++ if (phydev->drv && phydev->drv->detach)
++ phydev->drv->detach(phydev);
++
+ if (phydev->sysfs_links) {
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
+ sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -559,6 +559,12 @@ struct phy_driver {
+ */
+ int (*did_interrupt)(struct phy_device *phydev);
+
++ /*
++ * Called before an ethernet device is detached
++ * from the PHY.
++ */
++ void (*detach)(struct phy_device *phydev);
++
+ /* Clears up any memory if needed */
+ void (*remove)(struct phy_device *phydev);
+
diff --git a/target/linux/generic/pending-4.14/734-net-phy-at803x-allow-to-configure-via-pdata.patch b/target/linux/generic/pending-4.14/734-net-phy-at803x-allow-to-configure-via-pdata.patch
new file mode 100644
index 0000000..19dea17
--- /dev/null
+++ b/target/linux/generic/pending-4.14/734-net-phy-at803x-allow-to-configure-via-pdata.patch
@@ -0,0 +1,142 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: net: phy: allow to configure AR803x PHYs via platform data
+
+Add a patch for the at803x phy driver, in order to be able
+to configure some register settings via platform data.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/net/phy/at803x.c | 56 ++++++++++++++++++++++++++++++++
+ include/linux/platform_data/phy-at803x.h | 11 +++++++
+ 2 files changed, 67 insertions(+)
+ create mode 100644 include/linux/platform_data/phy-at803x.h
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -12,12 +12,14 @@
+ */
+
+ #include <linux/phy.h>
++#include <linux/mdio.h>
+ #include <linux/module.h>
+ #include <linux/string.h>
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/of_gpio.h>
+ #include <linux/gpio/consumer.h>
++#include <linux/platform_data/phy-at803x.h>
+
+ #define AT803X_INTR_ENABLE 0x12
+ #define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
+@@ -45,6 +47,11 @@
+ #define AT803X_REG_CHIP_CONFIG 0x1f
+ #define AT803X_BT_BX_REG_SEL 0x8000
+
++#define AT803X_PCS_SMART_EEE_CTRL3 0x805D
++#define AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_MASK 0x3
++#define AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_SHIFT 12
++#define AT803X_SMART_EEE_CTRL3_LPI_EN BIT(8)
++
+ #define AT803X_DEBUG_ADDR 0x1D
+ #define AT803X_DEBUG_DATA 0x1E
+
+@@ -73,6 +80,7 @@ MODULE_LICENSE("GPL");
+ struct at803x_priv {
+ bool phy_reset:1;
+ struct gpio_desc *gpiod_reset;
++ int prev_speed;
+ };
+
+ struct at803x_context {
+@@ -277,8 +285,16 @@ does_not_require_reset_workaround:
+ return 0;
+ }
+
++static void at803x_disable_smarteee(struct phy_device *phydev)
++{
++ phy_write_mmd(phydev, MDIO_MMD_PCS, AT803X_PCS_SMART_EEE_CTRL3,
++ 1 << AT803X_SMART_EEE_CTRL3_LPI_TX_DELAY_SEL_SHIFT);
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
++}
++
+ static int at803x_config_init(struct phy_device *phydev)
+ {
++ struct at803x_platform_data *pdata;
+ int ret;
+
+ ret = genphy_config_init(phydev);
+@@ -299,6 +315,26 @@ static int at803x_config_init(struct phy
+ return ret;
+ }
+
++ pdata = dev_get_platdata(&phydev->mdio.dev);
++ if (pdata) {
++ if (pdata->disable_smarteee)
++ at803x_disable_smarteee(phydev);
++
++ if (pdata->enable_rgmii_rx_delay)
++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
++ AT803X_DEBUG_RX_CLK_DLY_EN);
++ else
++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0,
++ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
++
++ if (pdata->enable_rgmii_tx_delay)
++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
++ AT803X_DEBUG_TX_CLK_DLY_EN);
++ else
++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
++ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
++ }
++
+ return 0;
+ }
+
+@@ -336,6 +372,8 @@ static int at803x_config_intr(struct phy
+ static void at803x_link_change_notify(struct phy_device *phydev)
+ {
+ struct at803x_priv *priv = phydev->priv;
++ struct at803x_platform_data *pdata;
++ pdata = dev_get_platdata(&phydev->mdio.dev);
+
+ /*
+ * Conduct a hardware reset for AT8030/2 every time a link loss is
+@@ -364,6 +402,24 @@ static void at803x_link_change_notify(st
+ } else {
+ priv->phy_reset = false;
+ }
++ if (pdata && pdata->fixup_rgmii_tx_delay &&
++ phydev->speed != priv->prev_speed) {
++ switch (phydev->speed) {
++ case SPEED_10:
++ case SPEED_100:
++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
++ AT803X_DEBUG_TX_CLK_DLY_EN);
++ break;
++ case SPEED_1000:
++ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
++ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
++ break;
++ default:
++ break;
++ }
++
++ priv->prev_speed = phydev->speed;
++ }
+ }
+
+ static int at803x_aneg_done(struct phy_device *phydev)
+--- /dev/null
++++ b/include/linux/platform_data/phy-at803x.h
+@@ -0,0 +1,11 @@
++#ifndef _PHY_AT803X_PDATA_H
++#define _PHY_AT803X_PDATA_H
++
++struct at803x_platform_data {
++ int disable_smarteee:1;
++ int enable_rgmii_tx_delay:1;
++ int enable_rgmii_rx_delay:1;
++ int fixup_rgmii_tx_delay:1;
++};
++
++#endif /* _PHY_AT803X_PDATA_H */
diff --git a/target/linux/generic/pending-4.14/735-net-phy-at803x-fix-at8033-sgmii-mode.patch b/target/linux/generic/pending-4.14/735-net-phy-at803x-fix-at8033-sgmii-mode.patch
new file mode 100644
index 0000000..3c68662
--- /dev/null
+++ b/target/linux/generic/pending-4.14/735-net-phy-at803x-fix-at8033-sgmii-mode.patch
@@ -0,0 +1,54 @@
+From: Roman Yeryomin <roman@advem.lv>
+Subject: kernel: add at803x fix for sgmii mode
+
+Some (possibly broken) bootloaders incorreclty initialize at8033
+phy. This patch enables sgmii autonegotiation mode.
+
+[john@phrozen.org: felix added this to his upstream queue]
+
+Signed-off-by: Roman Yeryomin <roman@advem.lv>
+---
+ drivers/net/phy/at803x.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -55,6 +55,10 @@
+ #define AT803X_DEBUG_ADDR 0x1D
+ #define AT803X_DEBUG_DATA 0x1E
+
++#define AT803X_REG_CHIP_CONFIG 0x1f
++#define AT803X_BT_BX_REG_SEL 0x8000
++#define AT803X_SGMII_ANEG_EN 0x1000
++
+ #define AT803X_MODE_CFG_MASK 0x0F
+ #define AT803X_MODE_CFG_SGMII 0x01
+
+@@ -296,6 +300,27 @@ static int at803x_config_init(struct phy
+ {
+ struct at803x_platform_data *pdata;
+ int ret;
++ u32 v;
++
++ if (phydev->drv->phy_id == ATH8031_PHY_ID &&
++ phydev->interface == PHY_INTERFACE_MODE_SGMII)
++ {
++ v = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
++ /* select SGMII/fiber page */
++ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG,
++ v & ~AT803X_BT_BX_REG_SEL);
++ if (ret)
++ return ret;
++ /* enable SGMII autonegotiation */
++ ret = phy_write(phydev, MII_BMCR, AT803X_SGMII_ANEG_EN);
++ if (ret)
++ return ret;
++ /* select copper page */
++ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG,
++ v | AT803X_BT_BX_REG_SEL);
++ if (ret)
++ return ret;
++ }
+
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
diff --git a/target/linux/generic/pending-4.14/810-pci_disable_common_quirks.patch b/target/linux/generic/pending-4.14/810-pci_disable_common_quirks.patch
new file mode 100644
index 0000000..2a5bcc8
--- /dev/null
+++ b/target/linux/generic/pending-4.14/810-pci_disable_common_quirks.patch
@@ -0,0 +1,60 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: debloat: add kernel config option to disabling common PCI quirks
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/pci/Kconfig | 6 ++++++
+ drivers/pci/quirks.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -71,6 +71,12 @@ config XEN_PCIDEV_FRONTEND
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
++config PCI_DISABLE_COMMON_QUIRKS
++ bool "PCI disable common quirks"
++ depends on PCI
++ help
++ If you don't know what to do here, say N.
++
+ config HT_IRQ
+ bool "Interrupts on hypertransport devices"
+ default y
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -43,6 +43,7 @@ static void quirk_mmio_always_on(struct
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /* The Mellanox Tavor device gives false positive parity errors
+ * Mark this device with a broken_parity_status, to allow
+ * PCI scanning code to "skip" this now blacklisted device.
+@@ -3077,6 +3078,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
+ /*
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To
+@@ -3133,6 +3135,8 @@ static void fixup_debug_report(struct pc
+ }
+ }
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ /*
+ * Some BIOS implementations leave the Intel GPU interrupts enabled,
+ * even though no one is handling them (f.e. i915 driver is never loaded).
+@@ -3167,6 +3171,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
+ /*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
diff --git a/target/linux/generic/pending-4.14/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/pending-4.14/811-pci_disable_usb_common_quirks.patch
new file mode 100644
index 0000000..005c25d
--- /dev/null
+++ b/target/linux/generic/pending-4.14/811-pci_disable_usb_common_quirks.patch
@@ -0,0 +1,113 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: debloat: disable common USB quirks
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/usb/host/pci-quirks.c | 16 ++++++++++++++++
+ drivers/usb/host/pci-quirks.h | 18 +++++++++++++++++-
+ include/linux/usb/hcd.h | 7 +++++++
+ 3 files changed, 40 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -107,6 +107,8 @@ struct amd_chipset_type {
+ u8 rev;
+ };
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+ struct pci_dev *smbus_dev;
+@@ -511,6 +513,10 @@ void usb_amd_dev_put(void)
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_dev_put);
+
++#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
++#if IS_ENABLED(CONFIG_USB_UHCI_HCD)
++
+ /*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+@@ -590,8 +596,17 @@ reset_needed:
+ uhci_reset_hc(pdev, base);
+ return 1;
+ }
++#else
++int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
++{
++ return 0;
++}
++
++#endif
+ EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+ {
+ u16 cmd;
+@@ -1158,6 +1173,7 @@ static void quirk_usb_early_handoff(stru
+ }
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
++#endif
+
+ bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+ {
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -5,6 +5,9 @@
+ #ifdef CONFIG_USB_PCI
+ void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
++#endif /* CONFIG_USB_PCI */
++
++#if defined(CONFIG_USB_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS)
+ int usb_amd_find_chipset_info(void);
+ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
+ bool usb_amd_hang_symptom_quirk(void);
+@@ -19,12 +22,25 @@ void sb800_prefetch(struct device *dev,
+ bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
+ #else
+ struct pci_dev;
++static inline int usb_amd_find_chipset_info(void)
++{
++ return 0;
++}
++static inline bool usb_amd_hang_symptom_quirk(void)
++{
++ return false;
++}
++static inline bool usb_amd_prefetch_quirk(void)
++{
++ return false;
++}
+ static inline void usb_amd_quirk_pll_disable(void) {}
+ static inline void usb_amd_quirk_pll_enable(void) {}
+ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
+ static inline void usb_amd_dev_put(void) {}
+ static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+ static inline void sb800_prefetch(struct device *dev, int on) {}
++static inline void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) {}
+ #endif /* CONFIG_USB_PCI */
+
+ #endif /* __LINUX_USB_PCI_QUIRKS_H */
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -465,7 +465,14 @@ extern int usb_hcd_pci_probe(struct pci_
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
++#else
++static inline int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev)
++{
++ return 0;
++}
++#endif
+
+ #ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
diff --git a/target/linux/generic/pending-4.14/821-usb-Remove-annoying-warning-about-bogus-URB.patch b/target/linux/generic/pending-4.14/821-usb-Remove-annoying-warning-about-bogus-URB.patch
new file mode 100644
index 0000000..c14e679
--- /dev/null
+++ b/target/linux/generic/pending-4.14/821-usb-Remove-annoying-warning-about-bogus-URB.patch
@@ -0,0 +1,76 @@
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Subject: usb: Remove annoying warning about bogus URB
+
+When ath9k-htc Wi-Fi dongle is used with generic OHCI controller
+infinite stream of warnings appears in debug console like this:
+-------------------------->8----------------------
+usb 1-1: new full-speed USB device number 2 using ohci-platform
+usb 1-1: ath9k_htc: Firmware ath9k_htc/htc_9271-1.4.0.fw requested
+usb 1-1: ath9k_htc: Transferred FW: ath9k_htc/htc_9271-1.4.0.fw, size:
+51008
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 19 at drivers/usb/core/urb.c:449
+usb_submit_urb+0x1b4/0x498()
+usb 1-1: BOGUS urb xfer, pipe 1 != type 3
+Modules linked in:
+CPU: 0 PID: 19 Comm: kworker/0:1 Not tainted
+4.4.0-rc4-00017-g00e2d79-dirty #3
+Workqueue: events request_firmware_work_func
+
+Stack Trace:
+ arc_unwind_core.constprop.1+0xa4/0x110
+---[ end trace 649ef8c342817fc2 ]---
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 19 at drivers/usb/core/urb.c:449
+usb_submit_urb+0x1b4/0x498()
+usb 1-1: BOGUS urb xfer, pipe 1 != type 3
+Modules linked in:
+CPU: 0 PID: 19 Comm: kworker/0:1 Tainted: G W
+4.4.0-rc4-00017-g00e2d79-dirty #3
+Workqueue: events request_firmware_work_func
+
+Stack Trace:
+ arc_unwind_core.constprop.1+0xa4/0x110
+---[ end trace 649ef8c342817fc3 ]---
+------------[ cut here ]------------
+-------------------------->8----------------------
+
+There're some discussions in mailing lists proposing to disable
+that particular check alltogether and magically all seem to work
+fine with muted warning.
+
+Anyways new thread on that regard could be found here:
+http://lists.infradead.org/pipermail/linux-snps-arc/2016-July/001310.html
+
+Let's see what comes out of that new discussion, hopefully patching
+of generic USB stuff won't be required then.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+---
+ drivers/usb/core/urb.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -326,9 +326,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
+ */
+ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
+ {
+- static int pipetypes[4] = {
+- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+- };
+ int xfertype, max;
+ struct usb_device *dev;
+ struct usb_host_endpoint *ep;
+@@ -443,11 +440,6 @@ int usb_submit_urb(struct urb *urb, gfp_
+ * cause problems in HCDs if they get it wrong.
+ */
+
+- /* Check that the pipe's type matches the endpoint's type */
+- if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
+- dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
+- usb_pipetype(urb->pipe), pipetypes[xfertype]);
+-
+ /* Check against a simple/standard policy */
+ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
+ URB_FREE_BUFFER);
diff --git a/target/linux/generic/pending-4.14/831-ledtrig_netdev.patch b/target/linux/generic/pending-4.14/831-ledtrig_netdev.patch
new file mode 100644
index 0000000..a12e6db
--- /dev/null
+++ b/target/linux/generic/pending-4.14/831-ledtrig_netdev.patch
@@ -0,0 +1,74 @@
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Subject: usb: Remove annoying warning about bogus URB
+
+When ath9k-htc Wi-Fi dongle is used with generic OHCI controller
+infinite stream of warnings appears in debug console like this:
+-------------------------->8----------------------
+usb 1-1: new full-speed USB device number 2 using ohci-platform
+usb 1-1: ath9k_htc: Firmware ath9k_htc/htc_9271-1.4.0.fw requested
+usb 1-1: ath9k_htc: Transferred FW: ath9k_htc/htc_9271-1.4.0.fw, size:
+51008
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 19 at drivers/usb/core/urb.c:449
+usb_submit_urb+0x1b4/0x498()
+usb 1-1: BOGUS urb xfer, pipe 1 != type 3
+Modules linked in:
+CPU: 0 PID: 19 Comm: kworker/0:1 Not tainted
+4.4.0-rc4-00017-g00e2d79-dirty #3
+Workqueue: events request_firmware_work_func
+
+Stack Trace:
+arc_unwind_core.constprop.1+0xa4/0x110
+---[ end trace 649ef8c342817fc2 ]---
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 19 at drivers/usb/core/urb.c:449
+usb_submit_urb+0x1b4/0x498()
+usb 1-1: BOGUS urb xfer, pipe 1 != type 3
+Modules linked in:
+CPU: 0 PID: 19 Comm: kworker/0:1 Tainted: G W
+4.4.0-rc4-00017-g00e2d79-dirty #3
+Workqueue: events request_firmware_work_func
+
+Stack Trace:
+arc_unwind_core.constprop.1+0xa4/0x110
+---[ end trace 649ef8c342817fc3 ]---
+------------[ cut here ]------------
+-------------------------->8----------------------
+
+There're some discussions in mailing lists proposing to disable
+that particular check alltogether and magically all seem to work
+fine with muted warning.
+
+Anyways new thread on that regard could be found here:
+http://lists.infradead.org/pipermail/linux-snps-arc/2016-July/001310.html
+
+Let's see what comes out of that new discussion, hopefully patching
+of generic USB stuff won't be required then.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+---
+ drivers/leds/Makefile | 1 +
+ drivers/leds/trigger/Kconfig | 7 +++++++
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -82,3 +82,4 @@ obj-$(CONFIG_LEDS_USER) += uleds.o
+
+ # LED Triggers
+ obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
++obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
+--- a/drivers/leds/trigger/Kconfig
++++ b/drivers/leds/trigger/Kconfig
+@@ -126,4 +126,11 @@ config LEDS_TRIGGER_PANIC
+ a different trigger.
+ If unsure, say Y.
+
++config LEDS_TRIGGER_NETDEV
++ tristate "LED Netdev Trigger"
++ depends on NET && LEDS_TRIGGERS
++ help
++ This allows LEDs to be controlled by network device activity.
++ If unsure, say Y.
++
+ endif # LEDS_TRIGGERS
diff --git a/target/linux/generic/pending-4.14/834-ledtrig-libata.patch b/target/linux/generic/pending-4.14/834-ledtrig-libata.patch
new file mode 100644
index 0000000..8597a93
--- /dev/null
+++ b/target/linux/generic/pending-4.14/834-ledtrig-libata.patch
@@ -0,0 +1,149 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: libata: add ledtrig support
+
+This adds a LED trigger for each ATA port indicating disk activity.
+
+As this is needed only on specific platforms (NAS SoCs and such),
+these platforms should define ARCH_WANTS_LIBATA_LEDS if there
+are boards with LED(s) intended to indicate ATA disk activity and
+need the OS to take care of that.
+In that way, if not selected, LED trigger support not will be
+included in libata-core and both, codepaths and structures remain
+untouched.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/ata/Kconfig | 16 ++++++++++++++++
+ drivers/ata/libata-core.c | 41 +++++++++++++++++++++++++++++++++++++++++
+ include/linux/libata.h | 9 +++++++++
+ 3 files changed, 66 insertions(+)
+
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -46,6 +46,22 @@ config ATA_VERBOSE_ERROR
+
+ If unsure, say Y.
+
++config ARCH_WANT_LIBATA_LEDS
++ bool
++
++config ATA_LEDS
++ bool "support ATA port LED triggers"
++ depends on ARCH_WANT_LIBATA_LEDS
++ select NEW_LEDS
++ select LEDS_CLASS
++ select LEDS_TRIGGERS
++ default y
++ help
++ This option adds a LED trigger for each registered ATA port.
++ It is used to drive disk activity leds connected via GPIO.
++
++ If unsure, say N.
++
+ config ATA_ACPI
+ bool "ATA ACPI Support"
+ depends on ACPI
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -731,6 +731,19 @@ u64 ata_tf_read_block(const struct ata_t
+ return block;
+ }
+
++#ifdef CONFIG_ATA_LEDS
++#define LIBATA_BLINK_DELAY 20 /* ms */
++static inline void ata_led_act(struct ata_port *ap)
++{
++ unsigned long led_delay = LIBATA_BLINK_DELAY;
++
++ if (unlikely(!ap->ledtrig))
++ return;
++
++ led_trigger_blink_oneshot(ap->ledtrig, &led_delay, &led_delay, 0);
++}
++#endif
++
+ /**
+ * ata_build_rw_tf - Build ATA taskfile for given read/write request
+ * @tf: Target ATA taskfile
+@@ -5089,6 +5102,9 @@ struct ata_queued_cmd *ata_qc_new_init(s
+ if (tag < 0)
+ return NULL;
+ }
++#ifdef CONFIG_ATA_LEDS
++ ata_led_act(ap);
++#endif
+
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = tag;
+@@ -5991,6 +6007,9 @@ struct ata_port *ata_port_alloc(struct a
+ ap->stats.unhandled_irq = 1;
+ ap->stats.idle_irq = 1;
+ #endif
++#ifdef CONFIG_ATA_LEDS
++ ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
++#endif
+ ata_sff_port_init(ap);
+
+ return ap;
+@@ -6012,6 +6031,12 @@ static void ata_host_release(struct devi
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
++#ifdef CONFIG_ATA_LEDS
++ if (ap->ledtrig) {
++ led_trigger_unregister(ap->ledtrig);
++ kfree(ap->ledtrig);
++ };
++#endif
+ kfree(ap);
+ host->ports[i] = NULL;
+ }
+@@ -6458,7 +6483,23 @@ int ata_host_register(struct ata_host *h
+ host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
+ host->ports[i]->local_port_no = i + 1;
+ }
++#ifdef CONFIG_ATA_LEDS
++ for (i = 0; i < host->n_ports; i++) {
++ if (unlikely(!host->ports[i]->ledtrig))
++ continue;
+
++ snprintf(host->ports[i]->ledtrig_name,
++ sizeof(host->ports[i]->ledtrig_name), "ata%u",
++ host->ports[i]->print_id);
++
++ host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
++
++ if (led_trigger_register(host->ports[i]->ledtrig)) {
++ kfree(host->ports[i]->ledtrig);
++ host->ports[i]->ledtrig = NULL;
++ }
++ }
++#endif
+ /* Create associated sysfs transport objects */
+ for (i = 0; i < host->n_ports; i++) {
+ rc = ata_tport_add(host->dev,host->ports[i]);
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -38,6 +38,9 @@
+ #include <linux/acpi.h>
+ #include <linux/cdrom.h>
+ #include <linux/sched.h>
++#ifdef CONFIG_ATA_LEDS
++#include <linux/leds.h>
++#endif
+
+ /*
+ * Define if arch has non-standard setup. This is a _PCI_ standard
+@@ -889,6 +892,12 @@ struct ata_port {
+ #ifdef CONFIG_ATA_ACPI
+ struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
+ #endif
++
++#ifdef CONFIG_ATA_LEDS
++ struct led_trigger *ledtrig;
++ char ledtrig_name[8];
++#endif
++
+ /* owned by EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
+ };
diff --git a/target/linux/generic/pending-4.14/920-mangle_bootargs.patch b/target/linux/generic/pending-4.14/920-mangle_bootargs.patch
new file mode 100644
index 0000000..be89441
--- /dev/null
+++ b/target/linux/generic/pending-4.14/920-mangle_bootargs.patch
@@ -0,0 +1,71 @@
+From: Imre Kaloz <kaloz@openwrt.org>
+Subject: init: add CONFIG_MANGLE_BOOTARGS and disable it by default
+
+Enabling this option renames the bootloader supplied root=
+and rootfstype= variables, which might have to be know but
+would break the automatisms OpenWrt uses.
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ init/Kconfig | 9 +++++++++
+ init/main.c | 24 ++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1420,6 +1420,15 @@ config EMBEDDED
+ an embedded system so certain expert options are available
+ for configuration.
+
++config MANGLE_BOOTARGS
++ bool "Rename offending bootargs"
++ depends on EXPERT
++ help
++ Sometimes the bootloader passed bogus root= and rootfstype=
++ parameters to the kernel, and while you want to ignore them,
++ you need to know the values f.e. to support dual firmware
++ layouts on the flash.
++
+ config HAVE_PERF_EVENTS
+ bool
+ help
+--- a/init/main.c
++++ b/init/main.c
+@@ -359,6 +359,29 @@ static inline void setup_nr_cpu_ids(void
+ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+ #endif
+
++#ifdef CONFIG_MANGLE_BOOTARGS
++static void __init mangle_bootargs(char *command_line)
++{
++ char *rootdev;
++ char *rootfs;
++
++ rootdev = strstr(command_line, "root=/dev/mtdblock");
++
++ if (rootdev)
++ strncpy(rootdev, "mangled_rootblock=", 18);
++
++ rootfs = strstr(command_line, "rootfstype");
++
++ if (rootfs)
++ strncpy(rootfs, "mangled_fs", 10);
++
++}
++#else
++static void __init mangle_bootargs(char *command_line)
++{
++}
++#endif
++
+ /*
+ * We need to store the untouched command line for future reference.
+ * We also need to store the touched command line since the parameter
+@@ -536,6 +559,7 @@ asmlinkage __visible void __init start_k
+ add_device_randomness(command_line, strlen(command_line));
+ boot_init_stack_canary();
+ mm_init_cpumask(&init_mm);
++ mangle_bootargs(command_line);
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();