summaryrefslogtreecommitdiff
path: root/target/linux/generic/patches-3.3
diff options
context:
space:
mode:
authorJonas Gorski <jogo@openwrt.org>2012-02-02 08:23:54 +0000
committerJonas Gorski <jogo@openwrt.org>2012-02-02 08:23:54 +0000
commiteca9950f63ec4491a7bd6138174ac08595184e1b (patch)
treea21d36b4e93a079ce234258a3a82b97dd82c10bf /target/linux/generic/patches-3.3
parentc336de3d85efb1a1150825870b7cbff4f7a65366 (diff)
downloadmtk-20170518-eca9950f63ec4491a7bd6138174ac08595184e1b.zip
mtk-20170518-eca9950f63ec4491a7bd6138174ac08595184e1b.tar.gz
mtk-20170518-eca9950f63ec4491a7bd6138174ac08595184e1b.tar.bz2
kernel: add preliminary support for linux 3.3
Based on 3.3-rc2 SVN-Revision: 29986
Diffstat (limited to 'target/linux/generic/patches-3.3')
-rw-r--r--target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch96
-rw-r--r--target/linux/generic/patches-3.3/100-overlayfs_v12.patch3232
-rw-r--r--target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch41
-rw-r--r--target/linux/generic/patches-3.3/110-fix_mtd_include.patch10
-rw-r--r--target/linux/generic/patches-3.3/200-fix_localversion.patch11
-rw-r--r--target/linux/generic/patches-3.3/201-extra_optimization.patch24
-rw-r--r--target/linux/generic/patches-3.3/210-darwin_scripts_include.patch78
-rw-r--r--target/linux/generic/patches-3.3/211-stddef_include.patch17
-rw-r--r--target/linux/generic/patches-3.3/220-module_exports.patch89
-rw-r--r--target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch54
-rw-r--r--target/linux/generic/patches-3.3/250-netfilter_depends.patch18
-rw-r--r--target/linux/generic/patches-3.3/251-sound_kconfig.patch11
-rw-r--r--target/linux/generic/patches-3.3/252-mv_cesa_depends.patch10
-rw-r--r--target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch29
-rw-r--r--target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch23
-rw-r--r--target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch19
-rw-r--r--target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch47
-rw-r--r--target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch22
-rw-r--r--target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch39
-rw-r--r--target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch28
-rw-r--r--target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch18
-rw-r--r--target/linux/generic/patches-3.3/303-mips_fix_kexec.patch11
-rw-r--r--target/linux/generic/patches-3.3/304-mips_disable_fpu.patch160
-rw-r--r--target/linux/generic/patches-3.3/305-mips_module_reloc.patch371
-rw-r--r--target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch83
-rw-r--r--target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch35
-rw-r--r--target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch13
-rw-r--r--target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch31
-rw-r--r--target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch56
-rw-r--r--target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch616
-rw-r--r--target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch159
-rw-r--r--target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch52
-rw-r--r--target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch88
-rw-r--r--target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch83
-rw-r--r--target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch186
-rw-r--r--target/linux/generic/patches-3.3/400-rootfs_split.patch327
-rw-r--r--target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch145
-rw-r--r--target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch18
-rw-r--r--target/linux/generic/patches-3.3/420-redboot_space.patch30
-rw-r--r--target/linux/generic/patches-3.3/421-redboot_boardconfig.patch60
-rw-r--r--target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch35
-rw-r--r--target/linux/generic/patches-3.3/440-block2mtd_init.patch116
-rw-r--r--target/linux/generic/patches-3.3/441-block2mtd_refresh.patch291
-rw-r--r--target/linux/generic/patches-3.3/442-block2mtd_probe.patch10
-rw-r--r--target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch37
-rw-r--r--target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch12
-rw-r--r--target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch11
-rw-r--r--target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch39
-rw-r--r--target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch10
-rw-r--r--target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch18
-rw-r--r--target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch41
-rw-r--r--target/linux/generic/patches-3.3/500-yaffs_support.patch18
-rw-r--r--target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch12344
-rw-r--r--target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch27068
-rw-r--r--target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch17
-rw-r--r--target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch20
-rw-r--r--target/linux/generic/patches-3.3/505-2.6.39_fix.patch147
-rw-r--r--target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch5142
-rw-r--r--target/linux/generic/patches-3.3/511-debloat_lzma.patch485
-rw-r--r--target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch132
-rw-r--r--target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch25
-rw-r--r--target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch2132
-rw-r--r--target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch108
-rw-r--r--target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch51
-rw-r--r--target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch61
-rw-r--r--target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch118
-rw-r--r--target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch93
-rw-r--r--target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch81
-rw-r--r--target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch16
-rw-r--r--target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch36
-rw-r--r--target/linux/generic/patches-3.3/620-sched_esfq.patch791
-rw-r--r--target/linux/generic/patches-3.3/621-sched_act_connmark.patch172
-rw-r--r--target/linux/generic/patches-3.3/630-packet_socket_type.patch132
-rw-r--r--target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch15
-rw-r--r--target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch11
-rw-r--r--target/linux/generic/patches-3.3/642-bridge_port_isolate.patch103
-rw-r--r--target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch107
-rw-r--r--target/linux/generic/patches-3.3/650-pppoe_header_pad.patch20
-rw-r--r--target/linux/generic/patches-3.3/651-wireless_mesh_header.patch11
-rw-r--r--target/linux/generic/patches-3.3/652-atm_header_changes.patch12
-rw-r--r--target/linux/generic/patches-3.3/700-swconfig.patch29
-rw-r--r--target/linux/generic/patches-3.3/701-phy_extension.patch72
-rw-r--r--target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch45
-rw-r--r--target/linux/generic/patches-3.3/720-phy_adm6996.patch26
-rw-r--r--target/linux/generic/patches-3.3/721-phy_packets.patch63
-rw-r--r--target/linux/generic/patches-3.3/722-phy_mvswitch.patch22
-rw-r--r--target/linux/generic/patches-3.3/723-phy_ip175c.patch23
-rw-r--r--target/linux/generic/patches-3.3/724-phy_ar8216.patch23
-rw-r--r--target/linux/generic/patches-3.3/725-phy_rtl8306.patch23
-rw-r--r--target/linux/generic/patches-3.3/726-phy_rtl8366.patch46
-rw-r--r--target/linux/generic/patches-3.3/727-phy-rtl8367.patch23
-rw-r--r--target/linux/generic/patches-3.3/750-hostap_txpower.patch154
-rw-r--r--target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch43
-rw-r--r--target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch18
-rw-r--r--target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch84
-rw-r--r--target/linux/generic/patches-3.3/830-ledtrig_morse.patch28
-rw-r--r--target/linux/generic/patches-3.3/831-ledtrig_netdev.patch51
-rw-r--r--target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch31
-rw-r--r--target/linux/generic/patches-3.3/833-gpio_buttons.patch30
-rw-r--r--target/linux/generic/patches-3.3/835-gpiodev.patch27
-rw-r--r--target/linux/generic/patches-3.3/840-rtc7301.patch250
-rw-r--r--target/linux/generic/patches-3.3/850-glamo_headers.patch21
-rw-r--r--target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch58
-rw-r--r--target/linux/generic/patches-3.3/862-gpio_spi_driver.patch373
-rw-r--r--target/linux/generic/patches-3.3/863-gpiommc.patch844
-rw-r--r--target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch58
-rw-r--r--target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch17
-rw-r--r--target/linux/generic/patches-3.3/900-slab_maxsize.patch13
-rw-r--r--target/linux/generic/patches-3.3/910-kobject_uevent.patch32
-rw-r--r--target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch85
-rw-r--r--target/linux/generic/patches-3.3/920-unable_to_open_console.patch11
-rw-r--r--target/linux/generic/patches-3.3/921-use_preinit_as_init.patch14
-rw-r--r--target/linux/generic/patches-3.3/930-crashlog.patch242
-rw-r--r--target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch20
-rw-r--r--target/linux/generic/patches-3.3/941-ocf_20110720.patch133
-rw-r--r--target/linux/generic/patches-3.3/950-vm_exports.patch117
-rw-r--r--target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch23
-rw-r--r--target/linux/generic/patches-3.3/980-update_arm_machtypes.patch2868
-rw-r--r--target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch64
-rw-r--r--target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch29
-rw-r--r--target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch57
121 files changed, 62718 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch b/target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch
new file mode 100644
index 0000000..bfb1757
--- /dev/null
+++ b/target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch
@@ -0,0 +1,96 @@
+From 2d303b4683145f7dbc918bd14d04e1396581b2ce Mon Sep 17 00:00:00 2001
+From: Imre Kaloz <kaloz@openwrt.org>
+Date: Thu, 7 Jul 2011 12:05:21 +0200
+Subject: [PATCH] ARM: support XZ compressed kernels
+
+Wire up support for the XZ decompressor
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/boot/compressed/Makefile | 11 +++++++++--
+ arch/arm/boot/compressed/decompress.c | 4 ++++
+ arch/arm/boot/compressed/piggy.xzkern.S | 6 ++++++
+ lib/xz/xz_dec_stream.c | 1 +
+ 5 files changed, 21 insertions(+), 2 deletions(-)
+ create mode 100644 arch/arm/boot/compressed/piggy.xzkern.S
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -21,6 +21,7 @@ config ARM
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_LZMA
++ select HAVE_KERNEL_XZ
+ select HAVE_IRQ_WORK
+ select HAVE_PERF_EVENTS
+ select PERF_USE_VMALLOC
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -92,6 +92,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/
+ suffix_$(CONFIG_KERNEL_GZIP) = gzip
+ suffix_$(CONFIG_KERNEL_LZO) = lzo
+ suffix_$(CONFIG_KERNEL_LZMA) = lzma
++suffix_$(CONFIG_KERNEL_XZ) = xzkern
+
+ # Borrowed libfdt files for the ATAG compatibility mode
+
+@@ -115,7 +116,7 @@ targets := vmlinux vmlinux.lds \
+ lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
+
+ # Make sure files are removed during clean
+-extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
++extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
+
+ ifeq ($(CONFIG_FUNCTION_TRACER),y)
+ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+@@ -171,8 +172,14 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CO
+ false; \
+ fi
+
++# For __aeabi_llsl
++ashldi3 = $(obj)/ashldi3.o
++
++$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S FORCE
++ $(call cmd,shipped)
++
+ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
+- $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
++ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE
+ @$(check_for_multiple_zreladdr)
+ $(call if_changed,ld)
+ @$(check_for_bad_syms)
+--- a/arch/arm/boot/compressed/decompress.c
++++ b/arch/arm/boot/compressed/decompress.c
+@@ -44,6 +44,12 @@ extern void error(char *);
+ #include "../../../../lib/decompress_unlzma.c"
+ #endif
+
++#ifdef CONFIG_KERNEL_XZ
++#define memmove memmove
++#define memcpy memcpy
++#include "../../../../lib/decompress_unxz.c"
++#endif
++
+ int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
+ {
+ return decompress(input, len, NULL, NULL, output, NULL, error);
+--- /dev/null
++++ b/arch/arm/boot/compressed/piggy.xzkern.S
+@@ -0,0 +1,6 @@
++ .section .piggydata,#alloc
++ .globl input_data
++input_data:
++ .incbin "arch/arm/boot/compressed/piggy.xzkern"
++ .globl input_data_end
++input_data_end:
+--- a/lib/xz/xz_dec_stream.c
++++ b/lib/xz/xz_dec_stream.c
+@@ -9,6 +9,7 @@
+
+ #include "xz_private.h"
+ #include "xz_stream.h"
++#include <linux/kernel.h>
+
+ /* Hash used to validate the Index field */
+ struct xz_dec_hash {
diff --git a/target/linux/generic/patches-3.3/100-overlayfs_v12.patch b/target/linux/generic/patches-3.3/100-overlayfs_v12.patch
new file mode 100644
index 0000000..b7508e6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/100-overlayfs_v12.patch
@@ -0,0 +1,3232 @@
+--- a/Documentation/filesystems/Locking
++++ b/Documentation/filesystems/Locking
+@@ -62,6 +62,7 @@ ata *);
+ int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
++ struct file *(*open)(struct dentry *,struct file *,const struct cred *);
+
+ locking rules:
+ all may block
+@@ -89,6 +90,7 @@ listxattr: no
+ removexattr: yes
+ truncate_range: yes
+ fiemap: no
++open: no
+ Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
+ victim.
+ cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
+--- /dev/null
++++ b/Documentation/filesystems/overlayfs.txt
+@@ -0,0 +1,199 @@
++Written by: Neil Brown <neilb@suse.de>
++
++Overlay Filesystem
++==================
++
++This document describes a prototype for a new approach to providing
++overlay-filesystem functionality in Linux (sometimes referred to as
++union-filesystems). An overlay-filesystem tries to present a
++filesystem which is the result over overlaying one filesystem on top
++of the other.
++
++The result will inevitably fail to look exactly like a normal
++filesystem for various technical reasons. The expectation is that
++many use cases will be able to ignore these differences.
++
++This approach is 'hybrid' because the objects that appear in the
++filesystem do not all appear to belong to that filesystem. In many
++cases an object accessed in the union will be indistinguishable
++from accessing the corresponding object from the original filesystem.
++This is most obvious from the 'st_dev' field returned by stat(2).
++
++While directories will report an st_dev from the overlay-filesystem,
++all non-directory objects will report an st_dev from the lower or
++upper filesystem that is providing the object. Similarly st_ino will
++only be unique when combined with st_dev, and both of these can change
++over the lifetime of a non-directory object. Many applications and
++tools ignore these values and will not be affected.
++
++Upper and Lower
++---------------
++
++An overlay filesystem combines two filesystems - an 'upper' filesystem
++and a 'lower' filesystem. When a name exists in both filesystems, the
++object in the 'upper' filesystem is visible while the object in the
++'lower' filesystem is either hidden or, in the case of directories,
++merged with the 'upper' object.
++
++It would be more correct to refer to an upper and lower 'directory
++tree' rather than 'filesystem' as it is quite possible for both
++directory trees to be in the same filesystem and there is no
++requirement that the root of a filesystem be given for either upper or
++lower.
++
++The lower filesystem can be any filesystem supported by Linux and does
++not need to be writable. The lower filesystem can even be another
++overlayfs. The upper filesystem will normally be writable and if it
++is it must support the creation of trusted.* extended attributes, and
++must provide valid d_type in readdir responses, at least for symbolic
++links - so NFS is not suitable.
++
++A read-only overlay of two read-only filesystems may use any
++filesystem type.
++
++Directories
++-----------
++
++Overlaying mainly involved directories. If a given name appears in both
++upper and lower filesystems and refers to a non-directory in either,
++then the lower object is hidden - the name refers only to the upper
++object.
++
++Where both upper and lower objects are directories, a merged directory
++is formed.
++
++At mount time, the two directories given as mount options are combined
++into a merged directory:
++
++ mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper /overlay
++
++Then whenever a lookup is requested in such a merged directory, the
++lookup is performed in each actual directory and the combined result
++is cached in the dentry belonging to the overlay filesystem. If both
++actual lookups find directories, both are stored and a merged
++directory is created, otherwise only one is stored: the upper if it
++exists, else the lower.
++
++Only the lists of names from directories are merged. Other content
++such as metadata and extended attributes are reported for the upper
++directory only. These attributes of the lower directory are hidden.
++
++whiteouts and opaque directories
++--------------------------------
++
++In order to support rm and rmdir without changing the lower
++filesystem, an overlay filesystem needs to record in the upper filesystem
++that files have been removed. This is done using whiteouts and opaque
++directories (non-directories are always opaque).
++
++The overlay filesystem uses extended attributes with a
++"trusted.overlay." prefix to record these details.
++
++A whiteout is created as a symbolic link with target
++"(overlay-whiteout)" and with xattr "trusted.overlay.whiteout" set to "y".
++When a whiteout is found in the upper level of a merged directory, any
++matching name in the lower level is ignored, and the whiteout itself
++is also hidden.
++
++A directory is made opaque by setting the xattr "trusted.overlay.opaque"
++to "y". Where the upper filesystem contains an opaque directory, any
++directory in the lower filesystem with the same name is ignored.
++
++readdir
++-------
++
++When a 'readdir' request is made on a merged directory, the upper and
++lower directories are each read and the name lists merged in the
++obvious way (upper is read first, then lower - entries that already
++exist are not re-added). This merged name list is cached in the
++'struct file' and so remains as long as the file is kept open. If the
++directory is opened and read by two processes at the same time, they
++will each have separate caches. A seekdir to the start of the
++directory (offset 0) followed by a readdir will cause the cache to be
++discarded and rebuilt.
++
++This means that changes to the merged directory do not appear while a
++directory is being read. This is unlikely to be noticed by many
++programs.
++
++seek offsets are assigned sequentially when the directories are read.
++Thus if
++ - read part of a directory
++ - remember an offset, and close the directory
++ - re-open the directory some time later
++ - seek to the remembered offset
++
++there may be little correlation between the old and new locations in
++the list of filenames, particularly if anything has changed in the
++directory.
++
++Readdir on directories that are not merged is simply handled by the
++underlying directory (upper or lower).
++
++
++Non-directories
++---------------
++
++Objects that are not directories (files, symlinks, device-special
++files etc.) are presented either from the upper or lower filesystem as
++appropriate. When a file in the lower filesystem is accessed in a way
++the requires write-access, such as opening for write access, changing
++some metadata etc., the file is first copied from the lower filesystem
++to the upper filesystem (copy_up). Note that creating a hard-link
++also requires copy_up, though of course creation of a symlink does
++not.
++
++The copy_up may turn out to be unnecessary, for example if the file is
++opened for read-write but the data is not modified.
++
++The copy_up process first makes sure that the containing directory
++exists in the upper filesystem - creating it and any parents as
++necessary. It then creates the object with the same metadata (owner,
++mode, mtime, symlink-target etc.) and then if the object is a file, the
++data is copied from the lower to the upper filesystem. Finally any
++extended attributes are copied up.
++
++Once the copy_up is complete, the overlay filesystem simply
++provides direct access to the newly created file in the upper
++filesystem - future operations on the file are barely noticed by the
++overlay filesystem (though an operation on the name of the file such as
++rename or unlink will of course be noticed and handled).
++
++
++Non-standard behavior
++---------------------
++
++The copy_up operation essentially creates a new, identical file and
++moves it over to the old name. The new file may be on a different
++filesystem, so both st_dev and st_ino of the file may change.
++
++Any open files referring to this inode will access the old data and
++metadata. Similarly any file locks obtained before copy_up will not
++apply to the copied up file.
++
++On a file is opened with O_RDONLY fchmod(2), fchown(2), futimesat(2)
++and fsetxattr(2) will fail with EROFS.
++
++If a file with multiple hard links is copied up, then this will
++"break" the link. Changes will not be propagated to other names
++referring to the same inode.
++
++Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
++object in overlayfs will not contain vaid absolute paths, only
++relative paths leading up to the filesystem's root. This will be
++fixed in the future.
++
++Some operations are not atomic, for example a crash during copy_up or
++rename will leave the filesystem in an inconsitent state. This will
++be addressed in the future.
++
++Changes to underlying filesystems
++---------------------------------
++
++Offline changes, when the overlay is not mounted, are allowed to either
++the upper or the lower trees.
++
++Changes to the underlying filesystems while part of a mounted overlay
++filesystem are not allowed. If the underlying filesystem is changed,
++the behavior of the overlay is undefined, though it will not result in
++a crash or deadlock.
+--- a/Documentation/filesystems/vfs.txt
++++ b/Documentation/filesystems/vfs.txt
+@@ -364,6 +364,8 @@ struct inode_operations {
+ ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
++ struct file *(*open) (struct dentry *, struct file *,
++ const struct cred *);
+ };
+
+ Again, all methods are called without any locks being held, unless
+@@ -475,6 +477,12 @@ otherwise noted.
+ truncate_range: a method provided by the underlying filesystem to truncate a
+ range of blocks , i.e. punch a hole somewhere in a file.
+
++ open: this is an alternative to f_op->open(), the difference is that this
++ method may return any open file, not necessarily originating from the
++ same filesystem as the one i_op->open() was called on. It may be useful
++ for stacking filesystems which want to allow native I/O directly on
++ underlying files.
++
+
+ The Address Space Object
+ ========================
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4930,6 +4930,13 @@ F: drivers/scsi/osd/
+ F: include/scsi/osd_*
+ F: fs/exofs/
+
++OVERLAYFS FILESYSTEM
++M: Miklos Szeredi <miklos@szeredi.hu>
++L: linux-fsdevel@vger.kernel.org
++S: Supported
++F: fs/overlayfs/*
++F: Documentation/filesystems/overlayfs.txt
++
+ P54 WIRELESS DRIVER
+ M: Christian Lamparter <chunkeey@googlemail.com>
+ L: linux-wireless@vger.kernel.org
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -63,6 +63,7 @@ source "fs/quota/Kconfig"
+
+ source "fs/autofs4/Kconfig"
+ source "fs/fuse/Kconfig"
++source "fs/overlayfs/Kconfig"
+
+ config CUSE
+ tristate "Character device in Userspace support"
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -105,6 +105,7 @@ obj-$(CONFIG_QNX4FS_FS) += qnx4/
+ obj-$(CONFIG_AUTOFS4_FS) += autofs4/
+ obj-$(CONFIG_ADFS_FS) += adfs/
+ obj-$(CONFIG_FUSE_FS) += fuse/
++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/
+ obj-$(CONFIG_UDF_FS) += udf/
+ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
+ obj-$(CONFIG_OMFS_FS) += omfs/
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -544,6 +544,13 @@ static struct dentry *ecryptfs_mount(str
+ s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ s->s_blocksize = path.dentry->d_sb->s_blocksize;
+ s->s_magic = ECRYPTFS_SUPER_MAGIC;
++ s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
++
++ rc = -EINVAL;
++ if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++ printk(KERN_ERR "eCryptfs: maximum fs stacking depth exceeded\n");
++ goto out_free;
++ }
+
+ inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+ rc = PTR_ERR(inode);
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1325,6 +1325,24 @@ void drop_collected_mounts(struct vfsmou
+ release_mounts(&umount_list);
+ }
+
++struct vfsmount *clone_private_mount(struct path *path)
++{
++ struct mount *old_mnt = real_mount(path->mnt);
++ struct mount *new_mnt;
++
++ if (IS_MNT_UNBINDABLE(old_mnt))
++ return ERR_PTR(-EINVAL);
++
++ down_read(&namespace_sem);
++ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
++ up_read(&namespace_sem);
++ if (!new_mnt)
++ return ERR_PTR(-ENOMEM);
++
++ return &new_mnt->mnt;
++}
++EXPORT_SYMBOL_GPL(clone_private_mount);
++
+ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
+ struct vfsmount *root)
+ {
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -644,24 +644,24 @@ static inline int __get_file_write_acces
+ return error;
+ }
+
+-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+- struct file *f,
+- int (*open)(struct inode *, struct file *),
+- const struct cred *cred)
++static struct file *__dentry_open(struct path *path, struct file *f,
++ int (*open)(struct inode *, struct file *),
++ const struct cred *cred)
+ {
+ static const struct file_operations empty_fops = {};
+ struct inode *inode;
+ int error;
+
++ path_get(path);
+ f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
+ FMODE_PREAD | FMODE_PWRITE;
+
+ if (unlikely(f->f_flags & O_PATH))
+ f->f_mode = FMODE_PATH;
+
+- inode = dentry->d_inode;
++ inode = path->dentry->d_inode;
+ if (f->f_mode & FMODE_WRITE) {
+- error = __get_file_write_access(inode, mnt);
++ error = __get_file_write_access(inode, path->mnt);
+ if (error)
+ goto cleanup_file;
+ if (!special_file(inode->i_mode))
+@@ -669,8 +669,7 @@ static struct file *__dentry_open(struct
+ }
+
+ f->f_mapping = inode->i_mapping;
+- f->f_path.dentry = dentry;
+- f->f_path.mnt = mnt;
++ f->f_path = *path;
+ f->f_pos = 0;
+ file_sb_list_add(f, inode->i_sb);
+
+@@ -727,7 +726,7 @@ cleanup_all:
+ * here, so just reset the state.
+ */
+ file_reset_write(f);
+- mnt_drop_write(mnt);
++ mnt_drop_write(path->mnt);
+ }
+ }
+ file_sb_list_del(f);
+@@ -735,8 +734,7 @@ cleanup_all:
+ f->f_path.mnt = NULL;
+ cleanup_file:
+ put_filp(f);
+- dput(dentry);
+- mntput(mnt);
++ path_put(path);
+ return ERR_PTR(error);
+ }
+
+@@ -762,14 +760,14 @@ cleanup_file:
+ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *))
+ {
++ struct path path = { .dentry = dentry, .mnt = nd->path.mnt };
+ const struct cred *cred = current_cred();
+
+ if (IS_ERR(nd->intent.open.file))
+ goto out;
+ if (IS_ERR(dentry))
+ goto out_err;
+- nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
+- nd->intent.open.file,
++ nd->intent.open.file = __dentry_open(&path, nd->intent.open.file,
+ open, cred);
+ out:
+ return nd->intent.open.file;
+@@ -797,11 +795,9 @@ struct file *nameidata_to_filp(struct na
+ nd->intent.open.file = NULL;
+
+ /* Has the filesystem initialised the file for us? */
+- if (filp->f_path.dentry == NULL) {
+- path_get(&nd->path);
+- filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
+- NULL, cred);
+- }
++ if (filp->f_path.dentry == NULL)
++ filp = vfs_open(&nd->path, filp, cred);
++
+ return filp;
+ }
+
+@@ -812,27 +808,48 @@ struct file *nameidata_to_filp(struct na
+ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
+ const struct cred *cred)
+ {
+- int error;
+ struct file *f;
++ struct file *ret;
++ struct path path = { .dentry = dentry, .mnt = mnt };
+
+ validate_creds(cred);
+
+ /* We must always pass in a valid mount pointer. */
+ BUG_ON(!mnt);
+
+- error = -ENFILE;
++ ret = ERR_PTR(-ENFILE);
+ f = get_empty_filp();
+- if (f == NULL) {
+- dput(dentry);
+- mntput(mnt);
+- return ERR_PTR(error);
++ if (f != NULL) {
++ f->f_flags = flags;
++ ret = vfs_open(&path, f, cred);
+ }
++ path_put(&path);
+
+- f->f_flags = flags;
+- return __dentry_open(dentry, mnt, f, NULL, cred);
++ return ret;
+ }
+ EXPORT_SYMBOL(dentry_open);
+
++/**
++ * vfs_open - open the file at the given path
++ * @path: path to open
++ * @filp: newly allocated file with f_flag initialized
++ * @cred: credentials to use
++ *
++ * Open the file. If successful, the returned file will have acquired
++ * an additional reference for path.
++ */
++struct file *vfs_open(struct path *path, struct file *filp,
++ const struct cred *cred)
++{
++ struct inode *inode = path->dentry->d_inode;
++
++ if (inode->i_op->open)
++ return inode->i_op->open(path->dentry, filp, cred);
++ else
++ return __dentry_open(path, filp, NULL, cred);
++}
++EXPORT_SYMBOL(vfs_open);
++
+ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+ {
+ struct fdtable *fdt = files_fdtable(files);
+--- /dev/null
++++ b/fs/overlayfs/Kconfig
+@@ -0,0 +1,4 @@
++config OVERLAYFS_FS
++ tristate "Overlay filesystem support"
++ help
++ Add support for overlay filesystem.
+--- /dev/null
++++ b/fs/overlayfs/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile for the overlay filesystem.
++#
++
++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o
++
++overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o
+--- /dev/null
++++ b/fs/overlayfs/copy_up.c
+@@ -0,0 +1,384 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/file.h>
++#include <linux/splice.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include <linux/uaccess.h>
++#include "overlayfs.h"
++
++#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
++
++static int ovl_copy_up_xattr(struct dentry *old, struct dentry *new)
++{
++ ssize_t list_size, size;
++ char *buf, *name, *value;
++ int error;
++
++ if (!old->d_inode->i_op->getxattr ||
++ !new->d_inode->i_op->getxattr)
++ return 0;
++
++ list_size = vfs_listxattr(old, NULL, 0);
++ if (list_size <= 0) {
++ if (list_size == -EOPNOTSUPP)
++ return 0;
++ return list_size;
++ }
++
++ buf = kzalloc(list_size, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ error = -ENOMEM;
++ value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
++ if (!value)
++ goto out;
++
++ list_size = vfs_listxattr(old, buf, list_size);
++ if (list_size <= 0) {
++ error = list_size;
++ goto out_free_value;
++ }
++
++ for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
++ size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
++ if (size <= 0) {
++ error = size;
++ goto out_free_value;
++ }
++ error = vfs_setxattr(new, name, value, size, 0);
++ if (error)
++ goto out_free_value;
++ }
++
++out_free_value:
++ kfree(value);
++out:
++ kfree(buf);
++ return error;
++}
++
++static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
++{
++ struct file *old_file;
++ struct file *new_file;
++ int error = 0;
++
++ if (len == 0)
++ return 0;
++
++ old_file = ovl_path_open(old, O_RDONLY);
++ if (IS_ERR(old_file))
++ return PTR_ERR(old_file);
++
++ new_file = ovl_path_open(new, O_WRONLY);
++ if (IS_ERR(new_file)) {
++ error = PTR_ERR(new_file);
++ goto out_fput;
++ }
++
++ /* FIXME: copy up sparse files efficiently */
++ while (len) {
++ loff_t offset = new_file->f_pos;
++ size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
++ long bytes;
++
++ if (len < this_len)
++ this_len = len;
++
++ if (signal_pending_state(TASK_KILLABLE, current)) {
++ error = -EINTR;
++ break;
++ }
++
++ bytes = do_splice_direct(old_file, &offset, new_file, this_len,
++ SPLICE_F_MOVE);
++ if (bytes <= 0) {
++ error = bytes;
++ break;
++ }
++
++ len -= bytes;
++ }
++
++ fput(new_file);
++out_fput:
++ fput(old_file);
++ return error;
++}
++
++static char *ovl_read_symlink(struct dentry *realdentry)
++{
++ int res;
++ char *buf;
++ struct inode *inode = realdentry->d_inode;
++ mm_segment_t old_fs;
++
++ res = -EINVAL;
++ if (!inode->i_op->readlink)
++ goto err;
++
++ res = -ENOMEM;
++ buf = (char *) __get_free_page(GFP_KERNEL);
++ if (!buf)
++ goto err;
++
++ old_fs = get_fs();
++ set_fs(get_ds());
++ /* The cast to a user pointer is valid due to the set_fs() */
++ res = inode->i_op->readlink(realdentry,
++ (char __user *)buf, PAGE_SIZE - 1);
++ set_fs(old_fs);
++ if (res < 0) {
++ free_page((unsigned long) buf);
++ goto err;
++ }
++ buf[res] = '\0';
++
++ return buf;
++
++err:
++ return ERR_PTR(res);
++}
++
++static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
++{
++ struct iattr attr = {
++ .ia_valid =
++ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
++ .ia_atime = stat->atime,
++ .ia_mtime = stat->mtime,
++ };
++
++ return notify_change(upperdentry, &attr);
++}
++
++static int ovl_set_mode(struct dentry *upperdentry, umode_t mode)
++{
++ struct iattr attr = {
++ .ia_valid = ATTR_MODE,
++ .ia_mode = mode,
++ };
++
++ return notify_change(upperdentry, &attr);
++}
++
++static int ovl_copy_up_locked(struct dentry *upperdir, struct dentry *dentry,
++ struct path *lowerpath, struct kstat *stat,
++ const char *link)
++{
++ int err;
++ struct path newpath;
++ umode_t mode = stat->mode;
++
++ /* Can't properly set mode on creation because of the umask */
++ stat->mode &= S_IFMT;
++
++ ovl_path_upper(dentry, &newpath);
++ WARN_ON(newpath.dentry);
++ newpath.dentry = ovl_upper_create(upperdir, dentry, stat, link);
++ if (IS_ERR(newpath.dentry))
++ return PTR_ERR(newpath.dentry);
++
++ if (S_ISREG(stat->mode)) {
++ err = ovl_copy_up_data(lowerpath, &newpath, stat->size);
++ if (err)
++ goto err_remove;
++ }
++
++ err = ovl_copy_up_xattr(lowerpath->dentry, newpath.dentry);
++ if (err)
++ goto err_remove;
++
++ mutex_lock(&newpath.dentry->d_inode->i_mutex);
++ if (!S_ISLNK(stat->mode))
++ err = ovl_set_mode(newpath.dentry, mode);
++ if (!err)
++ err = ovl_set_timestamps(newpath.dentry, stat);
++ mutex_unlock(&newpath.dentry->d_inode->i_mutex);
++ if (err)
++ goto err_remove;
++
++ ovl_dentry_update(dentry, newpath.dentry);
++
++ /*
++ * Easiest way to get rid of the lower dentry reference is to
++ * drop this dentry. This is neither needed nor possible for
++ * directories.
++ */
++ if (!S_ISDIR(stat->mode))
++ d_drop(dentry);
++
++ return 0;
++
++err_remove:
++ if (S_ISDIR(stat->mode))
++ vfs_rmdir(upperdir->d_inode, newpath.dentry);
++ else
++ vfs_unlink(upperdir->d_inode, newpath.dentry);
++
++ dput(newpath.dentry);
++
++ return err;
++}
++
++/*
++ * Copy up a single dentry
++ *
++ * Directory renames only allowed on "pure upper" (already created on
++ * upper filesystem, never copied up). Directories which are on lower or
++ * are merged may not be renamed. For these -EXDEV is returned and
++ * userspace has to deal with it. This means, when copying up a
++ * directory we can rely on it and ancestors being stable.
++ *
++ * Non-directory renames start with copy up of source if necessary. The
++ * actual rename will only proceed once the copy up was successful. Copy
++ * up uses upper parent i_mutex for exclusion. Since rename can change
++ * d_parent it is possible that the copy up will lock the old parent. At
++ * that point the file will have already been copied up anyway.
++ */
++static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
++ struct path *lowerpath, struct kstat *stat)
++{
++ int err;
++ struct kstat pstat;
++ struct path parentpath;
++ struct dentry *upperdir;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++ char *link = NULL;
++
++ ovl_path_upper(parent, &parentpath);
++ upperdir = parentpath.dentry;
++
++ err = vfs_getattr(parentpath.mnt, parentpath.dentry, &pstat);
++ if (err)
++ return err;
++
++ if (S_ISLNK(stat->mode)) {
++ link = ovl_read_symlink(lowerpath->dentry);
++ if (IS_ERR(link))
++ return PTR_ERR(link);
++ }
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_free_link;
++
++ override_cred->fsuid = stat->uid;
++ override_cred->fsgid = stat->gid;
++ /*
++ * CAP_SYS_ADMIN for copying up extended attributes
++ * CAP_DAC_OVERRIDE for create
++ * CAP_FOWNER for chmod, timestamp update
++ * CAP_FSETID for chmod
++ * CAP_MKNOD for mknod
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ cap_raise(override_cred->cap_effective, CAP_FSETID);
++ cap_raise(override_cred->cap_effective, CAP_MKNOD);
++ old_cred = override_creds(override_cred);
++
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ if (ovl_path_type(dentry) != OVL_PATH_LOWER) {
++ err = 0;
++ } else {
++ err = ovl_copy_up_locked(upperdir, dentry, lowerpath,
++ stat, link);
++ if (!err) {
++ /* Restore timestamps on parent (best effort) */
++ ovl_set_timestamps(upperdir, &pstat);
++ }
++ }
++
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++out_free_link:
++ if (link)
++ free_page((unsigned long) link);
++
++ return err;
++}
++
++int ovl_copy_up(struct dentry *dentry)
++{
++ int err;
++
++ err = 0;
++ while (!err) {
++ struct dentry *next;
++ struct dentry *parent;
++ struct path lowerpath;
++ struct kstat stat;
++ enum ovl_path_type type = ovl_path_type(dentry);
++
++ if (type != OVL_PATH_LOWER)
++ break;
++
++ next = dget(dentry);
++ /* find the topmost dentry not yet copied up */
++ for (;;) {
++ parent = dget_parent(next);
++
++ type = ovl_path_type(parent);
++ if (type != OVL_PATH_LOWER)
++ break;
++
++ dput(next);
++ next = parent;
++ }
++
++ ovl_path_lower(next, &lowerpath);
++ err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
++ if (!err)
++ err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
++
++ dput(parent);
++ dput(next);
++ }
++
++ return err;
++}
++
++/* Optimize by not copying up the file first and truncating later */
++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size)
++{
++ int err;
++ struct kstat stat;
++ struct path lowerpath;
++ struct dentry *parent = dget_parent(dentry);
++
++ err = ovl_copy_up(parent);
++ if (err)
++ goto out_dput_parent;
++
++ ovl_path_lower(dentry, &lowerpath);
++ err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
++ if (err)
++ goto out_dput_parent;
++
++ if (size < stat.size)
++ stat.size = size;
++
++ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
++
++out_dput_parent:
++ dput(parent);
++ return err;
++}
+--- /dev/null
++++ b/fs/overlayfs/dir.c
+@@ -0,0 +1,596 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include "overlayfs.h"
++
++static const char *ovl_whiteout_symlink = "(overlay-whiteout)";
++
++static int ovl_whiteout(struct dentry *upperdir, struct dentry *dentry)
++{
++ int err;
++ struct dentry *newdentry;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ /* FIXME: recheck lower dentry to see if whiteout is really needed */
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out;
++
++ /*
++ * CAP_SYS_ADMIN for setxattr
++ * CAP_DAC_OVERRIDE for symlink creation
++ * CAP_FOWNER for unlink in sticky directory
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ override_cred->fsuid = 0;
++ override_cred->fsgid = 0;
++ old_cred = override_creds(override_cred);
++
++ newdentry = lookup_one_len(dentry->d_name.name, upperdir,
++ dentry->d_name.len);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_put_cred;
++
++ /* Just been removed within the same locked region */
++ WARN_ON(newdentry->d_inode);
++
++ err = vfs_symlink(upperdir->d_inode, newdentry, ovl_whiteout_symlink);
++ if (err)
++ goto out_dput;
++
++ ovl_dentry_version_inc(dentry->d_parent);
++
++ err = vfs_setxattr(newdentry, ovl_whiteout_xattr, "y", 1, 0);
++ if (err)
++ vfs_unlink(upperdir->d_inode, newdentry);
++
++out_dput:
++ dput(newdentry);
++out_put_cred:
++ revert_creds(old_cred);
++ put_cred(override_cred);
++out:
++ if (err) {
++ /*
++ * There's no way to recover from failure to whiteout.
++ * What should we do? Log a big fat error and... ?
++ */
++ printk(KERN_ERR "overlayfs: ERROR - failed to whiteout '%s'\n",
++ dentry->d_name.name);
++ }
++
++ return err;
++}
++
++static struct dentry *ovl_lookup_create(struct dentry *upperdir,
++ struct dentry *template)
++{
++ int err;
++ struct dentry *newdentry;
++ struct qstr *name = &template->d_name;
++
++ newdentry = lookup_one_len(name->name, upperdir, name->len);
++ if (IS_ERR(newdentry))
++ return newdentry;
++
++ if (newdentry->d_inode) {
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ /* No need to check whiteout if lower parent is non-existent */
++ err = -EEXIST;
++ if (!ovl_dentry_lower(template->d_parent))
++ goto out_dput;
++
++ if (!S_ISLNK(newdentry->d_inode->i_mode))
++ goto out_dput;
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_dput;
++
++ /*
++ * CAP_SYS_ADMIN for getxattr
++ * CAP_FOWNER for unlink in sticky directory
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ old_cred = override_creds(override_cred);
++
++ err = -EEXIST;
++ if (ovl_is_whiteout(newdentry))
++ err = vfs_unlink(upperdir->d_inode, newdentry);
++
++ revert_creds(old_cred);
++ put_cred(override_cred);
++ if (err)
++ goto out_dput;
++
++ dput(newdentry);
++ newdentry = lookup_one_len(name->name, upperdir, name->len);
++ if (IS_ERR(newdentry)) {
++ ovl_whiteout(upperdir, template);
++ return newdentry;
++ }
++
++ /*
++ * Whiteout just been successfully removed, parent
++ * i_mutex is still held, there's no way the lookup
++ * could return positive.
++ */
++ WARN_ON(newdentry->d_inode);
++ }
++
++ return newdentry;
++
++out_dput:
++ dput(newdentry);
++ return ERR_PTR(err);
++}
++
++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
++ struct kstat *stat, const char *link)
++{
++ int err;
++ struct dentry *newdentry;
++ struct inode *dir = upperdir->d_inode;
++
++ newdentry = ovl_lookup_create(upperdir, dentry);
++ if (IS_ERR(newdentry))
++ goto out;
++
++ switch (stat->mode & S_IFMT) {
++ case S_IFREG:
++ err = vfs_create(dir, newdentry, stat->mode, NULL);
++ break;
++
++ case S_IFDIR:
++ err = vfs_mkdir(dir, newdentry, stat->mode);
++ break;
++
++ case S_IFCHR:
++ case S_IFBLK:
++ case S_IFIFO:
++ case S_IFSOCK:
++ err = vfs_mknod(dir, newdentry, stat->mode, stat->rdev);
++ break;
++
++ case S_IFLNK:
++ err = vfs_symlink(dir, newdentry, link);
++ break;
++
++ default:
++ err = -EPERM;
++ }
++ if (err) {
++ if (ovl_dentry_is_opaque(dentry))
++ ovl_whiteout(upperdir, dentry);
++ dput(newdentry);
++ newdentry = ERR_PTR(err);
++ } else if (WARN_ON(!newdentry->d_inode)) {
++ /*
++ * Not quite sure if non-instantiated dentry is legal or not.
++ * VFS doesn't seem to care so check and warn here.
++ */
++ dput(newdentry);
++ newdentry = ERR_PTR(-ENOENT);
++ }
++
++out:
++ return newdentry;
++
++}
++
++static int ovl_set_opaque(struct dentry *upperdentry)
++{
++ int err;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ /* CAP_SYS_ADMIN for setxattr of "trusted" namespace */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ old_cred = override_creds(override_cred);
++ err = vfs_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return err;
++}
++
++static int ovl_remove_opaque(struct dentry *upperdentry)
++{
++ int err;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ /* CAP_SYS_ADMIN for removexattr of "trusted" namespace */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ old_cred = override_creds(override_cred);
++ err = vfs_removexattr(upperdentry, ovl_opaque_xattr);
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return err;
++}
++
++static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
++ struct kstat *stat)
++{
++ int err;
++ enum ovl_path_type type;
++ struct path realpath;
++
++ type = ovl_path_real(dentry, &realpath);
++ err = vfs_getattr(realpath.mnt, realpath.dentry, stat);
++ if (err)
++ return err;
++
++ stat->dev = dentry->d_sb->s_dev;
++ stat->ino = dentry->d_inode->i_ino;
++
++ /*
++ * It's probably not worth it to count subdirs to get the
++ * correct link count. nlink=1 seems to pacify 'find' and
++ * other utilities.
++ */
++ if (type == OVL_PATH_MERGE)
++ stat->nlink = 1;
++
++ return 0;
++}
++
++static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
++ const char *link)
++{
++ int err;
++ struct dentry *newdentry;
++ struct dentry *upperdir;
++ struct inode *inode;
++ struct kstat stat = {
++ .mode = mode,
++ .rdev = rdev,
++ };
++
++ err = -ENOMEM;
++ inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
++ if (!inode)
++ goto out;
++
++ err = ovl_copy_up(dentry->d_parent);
++ if (err)
++ goto out_iput;
++
++ upperdir = ovl_dentry_upper(dentry->d_parent);
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++
++ newdentry = ovl_upper_create(upperdir, dentry, &stat, link);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_unlock;
++
++ ovl_dentry_version_inc(dentry->d_parent);
++ if (ovl_dentry_is_opaque(dentry) && S_ISDIR(mode)) {
++ err = ovl_set_opaque(newdentry);
++ if (err) {
++ vfs_rmdir(upperdir->d_inode, newdentry);
++ ovl_whiteout(upperdir, dentry);
++ goto out_dput;
++ }
++ }
++ ovl_dentry_update(dentry, newdentry);
++ d_instantiate(dentry, inode);
++ inode = NULL;
++ newdentry = NULL;
++ err = 0;
++
++out_dput:
++ dput(newdentry);
++out_unlock:
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++out_iput:
++ iput(inode);
++out:
++ return err;
++}
++
++static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ struct nameidata *nd)
++{
++ return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
++}
++
++static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
++{
++ return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
++}
++
++static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++ dev_t rdev)
++{
++ return ovl_create_object(dentry, mode, rdev, NULL);
++}
++
++static int ovl_symlink(struct inode *dir, struct dentry *dentry,
++ const char *link)
++{
++ return ovl_create_object(dentry, S_IFLNK, 0, link);
++}
++
++static int ovl_do_remove(struct dentry *dentry, bool is_dir)
++{
++ int err;
++ enum ovl_path_type type;
++ struct path realpath;
++ struct dentry *upperdir;
++
++ err = ovl_copy_up(dentry->d_parent);
++ if (err)
++ return err;
++
++ upperdir = ovl_dentry_upper(dentry->d_parent);
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ type = ovl_path_real(dentry, &realpath);
++ if (type != OVL_PATH_LOWER) {
++ err = -ESTALE;
++ if (realpath.dentry->d_parent != upperdir)
++ goto out_d_drop;
++
++ /* FIXME: create whiteout up front and rename to target */
++
++ if (is_dir)
++ err = vfs_rmdir(upperdir->d_inode, realpath.dentry);
++ else
++ err = vfs_unlink(upperdir->d_inode, realpath.dentry);
++ if (err)
++ goto out_d_drop;
++
++ ovl_dentry_version_inc(dentry->d_parent);
++ }
++
++ if (type != OVL_PATH_UPPER || ovl_dentry_is_opaque(dentry))
++ err = ovl_whiteout(upperdir, dentry);
++
++ /*
++ * Keeping this dentry hashed would mean having to release
++ * upperpath/lowerpath, which could only be done if we are the
++ * sole user of this dentry. Too tricky... Just unhash for
++ * now.
++ */
++out_d_drop:
++ d_drop(dentry);
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++
++ return err;
++}
++
++static int ovl_unlink(struct inode *dir, struct dentry *dentry)
++{
++ return ovl_do_remove(dentry, false);
++}
++
++
++static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
++{
++ int err;
++ enum ovl_path_type type;
++
++ type = ovl_path_type(dentry);
++ if (type != OVL_PATH_UPPER) {
++ err = ovl_check_empty_and_clear(dentry, type);
++ if (err)
++ return err;
++ }
++
++ return ovl_do_remove(dentry, true);
++}
++
++static int ovl_link(struct dentry *old, struct inode *newdir,
++ struct dentry *new)
++{
++ int err;
++ struct dentry *olddentry;
++ struct dentry *newdentry;
++ struct dentry *upperdir;
++
++ err = ovl_copy_up(old);
++ if (err)
++ goto out;
++
++ err = ovl_copy_up(new->d_parent);
++ if (err)
++ goto out;
++
++ upperdir = ovl_dentry_upper(new->d_parent);
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ newdentry = ovl_lookup_create(upperdir, new);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_unlock;
++
++ olddentry = ovl_dentry_upper(old);
++ err = vfs_link(olddentry, upperdir->d_inode, newdentry);
++ if (!err) {
++ if (WARN_ON(!newdentry->d_inode)) {
++ dput(newdentry);
++ err = -ENOENT;
++ goto out_unlock;
++ }
++
++ ovl_dentry_version_inc(new->d_parent);
++ ovl_dentry_update(new, newdentry);
++
++ ihold(old->d_inode);
++ d_instantiate(new, old->d_inode);
++ } else {
++ if (ovl_dentry_is_opaque(new))
++ ovl_whiteout(upperdir, new);
++ dput(newdentry);
++ }
++out_unlock:
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++out:
++ return err;
++
++}
++
++static int ovl_rename(struct inode *olddir, struct dentry *old,
++ struct inode *newdir, struct dentry *new)
++{
++ int err;
++ enum ovl_path_type old_type;
++ enum ovl_path_type new_type;
++ struct dentry *old_upperdir;
++ struct dentry *new_upperdir;
++ struct dentry *olddentry;
++ struct dentry *newdentry;
++ struct dentry *trap;
++ bool old_opaque;
++ bool new_opaque;
++ bool new_create = false;
++ bool is_dir = S_ISDIR(old->d_inode->i_mode);
++
++ /* Don't copy up directory trees */
++ old_type = ovl_path_type(old);
++ if (old_type != OVL_PATH_UPPER && is_dir)
++ return -EXDEV;
++
++ if (new->d_inode) {
++ new_type = ovl_path_type(new);
++
++ if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
++ if (ovl_dentry_lower(old)->d_inode ==
++ ovl_dentry_lower(new)->d_inode)
++ return 0;
++ }
++ if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
++ if (ovl_dentry_upper(old)->d_inode ==
++ ovl_dentry_upper(new)->d_inode)
++ return 0;
++ }
++
++ if (new_type != OVL_PATH_UPPER &&
++ S_ISDIR(new->d_inode->i_mode)) {
++ err = ovl_check_empty_and_clear(new, new_type);
++ if (err)
++ return err;
++ }
++ } else {
++ new_type = OVL_PATH_UPPER;
++ }
++
++ err = ovl_copy_up(old);
++ if (err)
++ return err;
++
++ err = ovl_copy_up(new->d_parent);
++ if (err)
++ return err;
++
++ old_upperdir = ovl_dentry_upper(old->d_parent);
++ new_upperdir = ovl_dentry_upper(new->d_parent);
++
++ trap = lock_rename(new_upperdir, old_upperdir);
++
++ olddentry = ovl_dentry_upper(old);
++ newdentry = ovl_dentry_upper(new);
++ if (newdentry) {
++ dget(newdentry);
++ } else {
++ new_create = true;
++ newdentry = ovl_lookup_create(new_upperdir, new);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_unlock;
++ }
++
++ err = -ESTALE;
++ if (olddentry->d_parent != old_upperdir)
++ goto out_dput;
++ if (newdentry->d_parent != new_upperdir)
++ goto out_dput;
++ if (olddentry == trap)
++ goto out_dput;
++ if (newdentry == trap)
++ goto out_dput;
++
++ old_opaque = ovl_dentry_is_opaque(old);
++ new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER;
++
++ if (is_dir && !old_opaque && new_opaque) {
++ err = ovl_set_opaque(olddentry);
++ if (err)
++ goto out_dput;
++ }
++
++ err = vfs_rename(old_upperdir->d_inode, olddentry,
++ new_upperdir->d_inode, newdentry);
++
++ if (err) {
++ if (new_create && ovl_dentry_is_opaque(new))
++ ovl_whiteout(new_upperdir, new);
++ if (is_dir && !old_opaque && new_opaque)
++ ovl_remove_opaque(olddentry);
++ goto out_dput;
++ }
++
++ if (old_type != OVL_PATH_UPPER || old_opaque)
++ err = ovl_whiteout(old_upperdir, old);
++ if (is_dir && old_opaque && !new_opaque)
++ ovl_remove_opaque(olddentry);
++
++ if (old_opaque != new_opaque)
++ ovl_dentry_set_opaque(old, new_opaque);
++
++ ovl_dentry_version_inc(old->d_parent);
++ ovl_dentry_version_inc(new->d_parent);
++
++out_dput:
++ dput(newdentry);
++out_unlock:
++ unlock_rename(new_upperdir, old_upperdir);
++ return err;
++}
++
++const struct inode_operations ovl_dir_inode_operations = {
++ .lookup = ovl_lookup,
++ .mkdir = ovl_mkdir,
++ .symlink = ovl_symlink,
++ .unlink = ovl_unlink,
++ .rmdir = ovl_rmdir,
++ .rename = ovl_rename,
++ .link = ovl_link,
++ .setattr = ovl_setattr,
++ .create = ovl_create,
++ .mknod = ovl_mknod,
++ .permission = ovl_permission,
++ .getattr = ovl_dir_getattr,
++ .setxattr = ovl_setxattr,
++ .getxattr = ovl_getxattr,
++ .listxattr = ovl_listxattr,
++ .removexattr = ovl_removexattr,
++};
+--- /dev/null
++++ b/fs/overlayfs/inode.c
+@@ -0,0 +1,384 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/xattr.h>
++#include "overlayfs.h"
++
++int ovl_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ struct dentry *upperdentry;
++ int err;
++
++ if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry))
++ err = ovl_copy_up_truncate(dentry, attr->ia_size);
++ else
++ err = ovl_copy_up(dentry);
++ if (err)
++ return err;
++
++ upperdentry = ovl_dentry_upper(dentry);
++
++ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++ attr->ia_valid &= ~ATTR_MODE;
++
++ mutex_lock(&upperdentry->d_inode->i_mutex);
++ err = notify_change(upperdentry, attr);
++ mutex_unlock(&upperdentry->d_inode->i_mutex);
++
++ return err;
++}
++
++static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
++ struct kstat *stat)
++{
++ struct path realpath;
++
++ ovl_path_real(dentry, &realpath);
++ return vfs_getattr(realpath.mnt, realpath.dentry, stat);
++}
++
++int ovl_permission(struct inode *inode, int mask)
++{
++ struct ovl_entry *oe;
++ struct dentry *alias = NULL;
++ struct inode *realinode;
++ struct dentry *realdentry;
++ bool is_upper;
++ int err;
++
++ if (S_ISDIR(inode->i_mode)) {
++ oe = inode->i_private;
++ } else if (mask & MAY_NOT_BLOCK) {
++ return -ECHILD;
++ } else {
++ /*
++ * For non-directories find an alias and get the info
++ * from there.
++ */
++ spin_lock(&inode->i_lock);
++ if (WARN_ON(list_empty(&inode->i_dentry))) {
++ spin_unlock(&inode->i_lock);
++ return -ENOENT;
++ }
++ alias = list_entry(inode->i_dentry.next,
++ struct dentry, d_alias);
++ dget(alias);
++ spin_unlock(&inode->i_lock);
++ oe = alias->d_fsdata;
++ }
++
++ realdentry = ovl_entry_real(oe, &is_upper);
++
++ /* Careful in RCU walk mode */
++ realinode = ACCESS_ONCE(realdentry->d_inode);
++ if (!realinode) {
++ WARN_ON(!(mask & MAY_NOT_BLOCK));
++ err = -ENOENT;
++ goto out_dput;
++ }
++
++ if (mask & MAY_WRITE) {
++ umode_t mode = realinode->i_mode;
++
++ /*
++ * Writes will always be redirected to upper layer, so
++ * ignore lower layer being read-only.
++ *
++ * If the overlay itself is read-only then proceed
++ * with the permission check, don't return EROFS.
++ * This will only happen if this is the lower layer of
++ * another overlayfs.
++ *
++ * If upper fs becomes read-only after the overlay was
++ * constructed return EROFS to prevent modification of
++ * upper layer.
++ */
++ err = -EROFS;
++ if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
++ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
++ goto out_dput;
++
++ /*
++ * Nobody gets write access to an immutable file.
++ */
++ err = -EACCES;
++ if (IS_IMMUTABLE(realinode))
++ goto out_dput;
++ }
++
++ if (realinode->i_op->permission)
++ err = realinode->i_op->permission(realinode, mask);
++ else
++ err = generic_permission(realinode, mask);
++out_dput:
++ dput(alias);
++ return err;
++}
++
++
++struct ovl_link_data {
++ struct dentry *realdentry;
++ void *cookie;
++};
++
++static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ void *ret;
++ struct dentry *realdentry;
++ struct inode *realinode;
++
++ realdentry = ovl_dentry_real(dentry);
++ realinode = realdentry->d_inode;
++
++ if (WARN_ON(!realinode->i_op->follow_link))
++ return ERR_PTR(-EPERM);
++
++ ret = realinode->i_op->follow_link(realdentry, nd);
++ if (IS_ERR(ret))
++ return ret;
++
++ if (realinode->i_op->put_link) {
++ struct ovl_link_data *data;
++
++ data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
++ if (!data) {
++ realinode->i_op->put_link(realdentry, nd, ret);
++ return ERR_PTR(-ENOMEM);
++ }
++ data->realdentry = realdentry;
++ data->cookie = ret;
++
++ return data;
++ } else {
++ return NULL;
++ }
++}
++
++static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
++{
++ struct inode *realinode;
++ struct ovl_link_data *data = c;
++
++ if (!data)
++ return;
++
++ realinode = data->realdentry->d_inode;
++ realinode->i_op->put_link(data->realdentry, nd, data->cookie);
++ kfree(data);
++}
++
++static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
++{
++ struct path realpath;
++ struct inode *realinode;
++
++ ovl_path_real(dentry, &realpath);
++ realinode = realpath.dentry->d_inode;
++
++ if (!realinode->i_op->readlink)
++ return -EINVAL;
++
++ touch_atime(realpath.mnt, realpath.dentry);
++
++ return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
++}
++
++
++static bool ovl_is_private_xattr(const char *name)
++{
++ return strncmp(name, "trusted.overlay.", 14) == 0;
++}
++
++int ovl_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ int err;
++ struct dentry *upperdentry;
++
++ if (ovl_is_private_xattr(name))
++ return -EPERM;
++
++ err = ovl_copy_up(dentry);
++ if (err)
++ return err;
++
++ upperdentry = ovl_dentry_upper(dentry);
++ return vfs_setxattr(upperdentry, name, value, size, flags);
++}
++
++ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
++ void *value, size_t size)
++{
++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
++ ovl_is_private_xattr(name))
++ return -ENODATA;
++
++ return vfs_getxattr(ovl_dentry_real(dentry), name, value, size);
++}
++
++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
++{
++ ssize_t res;
++ int off;
++
++ res = vfs_listxattr(ovl_dentry_real(dentry), list, size);
++ if (res <= 0 || size == 0)
++ return res;
++
++ if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE)
++ return res;
++
++ /* filter out private xattrs */
++ for (off = 0; off < res;) {
++ char *s = list + off;
++ size_t slen = strlen(s) + 1;
++
++ BUG_ON(off + slen > res);
++
++ if (ovl_is_private_xattr(s)) {
++ res -= slen;
++ memmove(s, s + slen, res - off);
++ } else {
++ off += slen;
++ }
++ }
++
++ return res;
++}
++
++int ovl_removexattr(struct dentry *dentry, const char *name)
++{
++ int err;
++ struct path realpath;
++ enum ovl_path_type type;
++
++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
++ ovl_is_private_xattr(name))
++ return -ENODATA;
++
++ type = ovl_path_real(dentry, &realpath);
++ if (type == OVL_PATH_LOWER) {
++ err = vfs_getxattr(realpath.dentry, name, NULL, 0);
++ if (err < 0)
++ return err;
++
++ err = ovl_copy_up(dentry);
++ if (err)
++ return err;
++
++ ovl_path_upper(dentry, &realpath);
++ }
++
++ return vfs_removexattr(realpath.dentry, name);
++}
++
++static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
++ struct dentry *realdentry)
++{
++ if (type != OVL_PATH_LOWER)
++ return false;
++
++ if (special_file(realdentry->d_inode->i_mode))
++ return false;
++
++ if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
++ return false;
++
++ return true;
++}
++
++static struct file *ovl_open(struct dentry *dentry, struct file *file,
++ const struct cred *cred)
++{
++ int err;
++ struct path realpath;
++ enum ovl_path_type type;
++
++ type = ovl_path_real(dentry, &realpath);
++ if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
++ if (file->f_flags & O_TRUNC)
++ err = ovl_copy_up_truncate(dentry, 0);
++ else
++ err = ovl_copy_up(dentry);
++ if (err)
++ return ERR_PTR(err);
++
++ ovl_path_upper(dentry, &realpath);
++ }
++
++ return vfs_open(&realpath, file, cred);
++}
++
++static const struct inode_operations ovl_file_inode_operations = {
++ .setattr = ovl_setattr,
++ .permission = ovl_permission,
++ .getattr = ovl_getattr,
++ .setxattr = ovl_setxattr,
++ .getxattr = ovl_getxattr,
++ .listxattr = ovl_listxattr,
++ .removexattr = ovl_removexattr,
++ .open = ovl_open,
++};
++
++static const struct inode_operations ovl_symlink_inode_operations = {
++ .setattr = ovl_setattr,
++ .follow_link = ovl_follow_link,
++ .put_link = ovl_put_link,
++ .readlink = ovl_readlink,
++ .getattr = ovl_getattr,
++ .setxattr = ovl_setxattr,
++ .getxattr = ovl_getxattr,
++ .listxattr = ovl_listxattr,
++ .removexattr = ovl_removexattr,
++};
++
++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
++ struct ovl_entry *oe)
++{
++ struct inode *inode;
++
++ inode = new_inode(sb);
++ if (!inode)
++ return NULL;
++
++ mode &= S_IFMT;
++
++ inode->i_ino = get_next_ino();
++ inode->i_mode = mode;
++ inode->i_flags |= S_NOATIME | S_NOCMTIME;
++
++ switch (mode) {
++ case S_IFDIR:
++ inode->i_private = oe;
++ inode->i_op = &ovl_dir_inode_operations;
++ inode->i_fop = &ovl_dir_operations;
++ break;
++
++ case S_IFLNK:
++ inode->i_op = &ovl_symlink_inode_operations;
++ break;
++
++ case S_IFREG:
++ case S_IFSOCK:
++ case S_IFBLK:
++ case S_IFCHR:
++ case S_IFIFO:
++ inode->i_op = &ovl_file_inode_operations;
++ break;
++
++ default:
++ WARN(1, "illegal file type: %i\n", mode);
++ inode = NULL;
++ }
++
++ return inode;
++
++}
+--- /dev/null
++++ b/fs/overlayfs/overlayfs.h
+@@ -0,0 +1,64 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++struct ovl_entry;
++
++enum ovl_path_type {
++ OVL_PATH_UPPER,
++ OVL_PATH_MERGE,
++ OVL_PATH_LOWER,
++};
++
++extern const char *ovl_opaque_xattr;
++extern const char *ovl_whiteout_xattr;
++extern const struct dentry_operations ovl_dentry_operations;
++
++enum ovl_path_type ovl_path_type(struct dentry *dentry);
++u64 ovl_dentry_version_get(struct dentry *dentry);
++void ovl_dentry_version_inc(struct dentry *dentry);
++void ovl_path_upper(struct dentry *dentry, struct path *path);
++void ovl_path_lower(struct dentry *dentry, struct path *path);
++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
++struct dentry *ovl_dentry_upper(struct dentry *dentry);
++struct dentry *ovl_dentry_lower(struct dentry *dentry);
++struct dentry *ovl_dentry_real(struct dentry *dentry);
++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
++bool ovl_dentry_is_opaque(struct dentry *dentry);
++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
++bool ovl_is_whiteout(struct dentry *dentry);
++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *nd);
++struct file *ovl_path_open(struct path *path, int flags);
++
++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
++ struct kstat *stat, const char *link);
++
++/* readdir.c */
++extern const struct file_operations ovl_dir_operations;
++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type);
++
++/* inode.c */
++int ovl_setattr(struct dentry *dentry, struct iattr *attr);
++int ovl_permission(struct inode *inode, int mask);
++int ovl_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags);
++ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
++ void *value, size_t size);
++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
++int ovl_removexattr(struct dentry *dentry, const char *name);
++
++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
++ struct ovl_entry *oe);
++/* dir.c */
++extern const struct inode_operations ovl_dir_inode_operations;
++
++/* copy_up.c */
++int ovl_copy_up(struct dentry *dentry);
++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size);
+--- /dev/null
++++ b/fs/overlayfs/readdir.c
+@@ -0,0 +1,565 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/namei.h>
++#include <linux/file.h>
++#include <linux/xattr.h>
++#include <linux/rbtree.h>
++#include <linux/security.h>
++#include "overlayfs.h"
++
++struct ovl_cache_entry {
++ const char *name;
++ unsigned int len;
++ unsigned int type;
++ u64 ino;
++ bool is_whiteout;
++ struct list_head l_node;
++ struct rb_node node;
++};
++
++struct ovl_readdir_data {
++ struct rb_root *root;
++ struct list_head *list;
++ struct list_head *middle;
++ struct dentry *dir;
++ int count;
++ int err;
++};
++
++struct ovl_dir_file {
++ bool is_real;
++ bool is_cached;
++ struct list_head cursor;
++ u64 cache_version;
++ struct list_head cache;
++ struct file *realfile;
++};
++
++static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
++{
++ return container_of(n, struct ovl_cache_entry, node);
++}
++
++static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
++ const char *name, int len)
++{
++ struct rb_node *node = root->rb_node;
++ int cmp;
++
++ while (node) {
++ struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
++
++ cmp = strncmp(name, p->name, len);
++ if (cmp > 0)
++ node = p->node.rb_right;
++ else if (cmp < 0 || len < p->len)
++ node = p->node.rb_left;
++ else
++ return p;
++ }
++
++ return NULL;
++}
++
++static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
++ u64 ino, unsigned int d_type)
++{
++ struct ovl_cache_entry *p;
++
++ p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL);
++ if (p) {
++ char *name_copy = (char *) (p + 1);
++ memcpy(name_copy, name, len);
++ name_copy[len] = '\0';
++ p->name = name_copy;
++ p->len = len;
++ p->type = d_type;
++ p->ino = ino;
++ p->is_whiteout = false;
++ }
++
++ return p;
++}
++
++static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
++ const char *name, int len, u64 ino,
++ unsigned int d_type)
++{
++ struct rb_node **newp = &rdd->root->rb_node;
++ struct rb_node *parent = NULL;
++ struct ovl_cache_entry *p;
++
++ while (*newp) {
++ int cmp;
++ struct ovl_cache_entry *tmp;
++
++ parent = *newp;
++ tmp = ovl_cache_entry_from_node(*newp);
++ cmp = strncmp(name, tmp->name, len);
++ if (cmp > 0)
++ newp = &tmp->node.rb_right;
++ else if (cmp < 0 || len < tmp->len)
++ newp = &tmp->node.rb_left;
++ else
++ return 0;
++ }
++
++ p = ovl_cache_entry_new(name, len, ino, d_type);
++ if (p == NULL)
++ return -ENOMEM;
++
++ list_add_tail(&p->l_node, rdd->list);
++ rb_link_node(&p->node, parent, newp);
++ rb_insert_color(&p->node, rdd->root);
++
++ return 0;
++}
++
++static int ovl_fill_lower(void *buf, const char *name, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct ovl_readdir_data *rdd = buf;
++ struct ovl_cache_entry *p;
++
++ rdd->count++;
++ p = ovl_cache_entry_find(rdd->root, name, namelen);
++ if (p) {
++ list_move_tail(&p->l_node, rdd->middle);
++ } else {
++ p = ovl_cache_entry_new(name, namelen, ino, d_type);
++ if (p == NULL)
++ rdd->err = -ENOMEM;
++ else
++ list_add_tail(&p->l_node, rdd->middle);
++ }
++
++ return rdd->err;
++}
++
++static void ovl_cache_free(struct list_head *list)
++{
++ struct ovl_cache_entry *p;
++ struct ovl_cache_entry *n;
++
++ list_for_each_entry_safe(p, n, list, l_node)
++ kfree(p);
++
++ INIT_LIST_HEAD(list);
++}
++
++static int ovl_fill_upper(void *buf, const char *name, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct ovl_readdir_data *rdd = buf;
++
++ rdd->count++;
++ return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
++}
++
++static inline int ovl_dir_read(struct path *realpath,
++ struct ovl_readdir_data *rdd, filldir_t filler)
++{
++ struct file *realfile;
++ int err;
++
++ realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
++ if (IS_ERR(realfile))
++ return PTR_ERR(realfile);
++
++ do {
++ rdd->count = 0;
++ rdd->err = 0;
++ err = vfs_readdir(realfile, filler, rdd);
++ if (err >= 0)
++ err = rdd->err;
++ } while (!err && rdd->count);
++ fput(realfile);
++
++ return 0;
++}
++
++static void ovl_dir_reset(struct file *file)
++{
++ struct ovl_dir_file *od = file->private_data;
++ enum ovl_path_type type = ovl_path_type(file->f_path.dentry);
++
++ if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) {
++ list_del_init(&od->cursor);
++ ovl_cache_free(&od->cache);
++ od->is_cached = false;
++ }
++ WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
++ if (od->is_real && type == OVL_PATH_MERGE) {
++ fput(od->realfile);
++ od->realfile = NULL;
++ od->is_real = false;
++ }
++}
++
++static int ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd)
++{
++ struct ovl_cache_entry *p;
++ struct dentry *dentry;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred) {
++ ovl_cache_free(rdd->list);
++ return -ENOMEM;
++ }
++
++ /*
++ * CAP_SYS_ADMIN for getxattr
++ * CAP_DAC_OVERRIDE for lookup
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ old_cred = override_creds(override_cred);
++
++ mutex_lock(&rdd->dir->d_inode->i_mutex);
++ list_for_each_entry(p, rdd->list, l_node) {
++ if (p->type != DT_LNK)
++ continue;
++
++ dentry = lookup_one_len(p->name, rdd->dir, p->len);
++ if (IS_ERR(dentry))
++ continue;
++
++ p->is_whiteout = ovl_is_whiteout(dentry);
++ dput(dentry);
++ }
++ mutex_unlock(&rdd->dir->d_inode->i_mutex);
++
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return 0;
++}
++
++static inline int ovl_dir_read_merged(struct path *upperpath,
++ struct path *lowerpath,
++ struct ovl_readdir_data *rdd)
++{
++ int err;
++ struct rb_root root = RB_ROOT;
++ struct list_head middle;
++
++ rdd->root = &root;
++ if (upperpath->dentry) {
++ rdd->dir = upperpath->dentry;
++ err = ovl_dir_read(upperpath, rdd, ovl_fill_upper);
++ if (err)
++ goto out;
++
++ err = ovl_dir_mark_whiteouts(rdd);
++ if (err)
++ goto out;
++ }
++ /*
++ * Insert lowerpath entries before upperpath ones, this allows
++ * offsets to be reasonably constant
++ */
++ list_add(&middle, rdd->list);
++ rdd->middle = &middle;
++ err = ovl_dir_read(lowerpath, rdd, ovl_fill_lower);
++ list_del(&middle);
++out:
++ rdd->root = NULL;
++
++ return err;
++}
++
++static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
++{
++ struct list_head *l;
++ loff_t off;
++
++ l = od->cache.next;
++ for (off = 0; off < pos; off++) {
++ if (l == &od->cache)
++ break;
++ l = l->next;
++ }
++ list_move_tail(&od->cursor, l);
++}
++
++static int ovl_readdir(struct file *file, void *buf, filldir_t filler)
++{
++ struct ovl_dir_file *od = file->private_data;
++ int res;
++
++ if (!file->f_pos)
++ ovl_dir_reset(file);
++
++ if (od->is_real) {
++ res = vfs_readdir(od->realfile, filler, buf);
++ file->f_pos = od->realfile->f_pos;
++
++ return res;
++ }
++
++ if (!od->is_cached) {
++ struct path lowerpath;
++ struct path upperpath;
++ struct ovl_readdir_data rdd = { .list = &od->cache };
++
++ ovl_path_lower(file->f_path.dentry, &lowerpath);
++ ovl_path_upper(file->f_path.dentry, &upperpath);
++
++ res = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
++ if (res) {
++ ovl_cache_free(rdd.list);
++ return res;
++ }
++
++ od->cache_version = ovl_dentry_version_get(file->f_path.dentry);
++ od->is_cached = true;
++
++ ovl_seek_cursor(od, file->f_pos);
++ }
++
++ while (od->cursor.next != &od->cache) {
++ int over;
++ loff_t off;
++ struct ovl_cache_entry *p;
++
++ p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node);
++ off = file->f_pos;
++ if (!p->is_whiteout) {
++ over = filler(buf, p->name, p->len, off, p->ino,
++ p->type);
++ if (over)
++ break;
++ }
++ file->f_pos++;
++ list_move(&od->cursor, &p->l_node);
++ }
++
++ return 0;
++}
++
++static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ loff_t res;
++ struct ovl_dir_file *od = file->private_data;
++
++ mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ if (!file->f_pos)
++ ovl_dir_reset(file);
++
++ if (od->is_real) {
++ res = vfs_llseek(od->realfile, offset, origin);
++ file->f_pos = od->realfile->f_pos;
++ } else {
++ res = -EINVAL;
++
++ switch (origin) {
++ case SEEK_CUR:
++ offset += file->f_pos;
++ break;
++ case SEEK_SET:
++ break;
++ default:
++ goto out_unlock;
++ }
++ if (offset < 0)
++ goto out_unlock;
++
++ if (offset != file->f_pos) {
++ file->f_pos = offset;
++ if (od->is_cached)
++ ovl_seek_cursor(od, offset);
++ }
++ res = offset;
++ }
++out_unlock:
++ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++
++ return res;
++}
++
++static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
++ int datasync)
++{
++ struct ovl_dir_file *od = file->private_data;
++
++ /* May need to reopen directory if it got copied up */
++ if (!od->realfile) {
++ struct path upperpath;
++
++ ovl_path_upper(file->f_path.dentry, &upperpath);
++ od->realfile = ovl_path_open(&upperpath, O_RDONLY);
++ if (IS_ERR(od->realfile))
++ return PTR_ERR(od->realfile);
++ }
++
++ return vfs_fsync_range(od->realfile, start, end, datasync);
++}
++
++static int ovl_dir_release(struct inode *inode, struct file *file)
++{
++ struct ovl_dir_file *od = file->private_data;
++
++ list_del(&od->cursor);
++ ovl_cache_free(&od->cache);
++ if (od->realfile)
++ fput(od->realfile);
++ kfree(od);
++
++ return 0;
++}
++
++static int ovl_dir_open(struct inode *inode, struct file *file)
++{
++ struct path realpath;
++ struct file *realfile;
++ struct ovl_dir_file *od;
++ enum ovl_path_type type;
++
++ od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
++ if (!od)
++ return -ENOMEM;
++
++ type = ovl_path_real(file->f_path.dentry, &realpath);
++ realfile = ovl_path_open(&realpath, file->f_flags);
++ if (IS_ERR(realfile)) {
++ kfree(od);
++ return PTR_ERR(realfile);
++ }
++ INIT_LIST_HEAD(&od->cache);
++ INIT_LIST_HEAD(&od->cursor);
++ od->is_cached = false;
++ od->realfile = realfile;
++ od->is_real = (type != OVL_PATH_MERGE);
++ file->private_data = od;
++
++ return 0;
++}
++
++const struct file_operations ovl_dir_operations = {
++ .read = generic_read_dir,
++ .open = ovl_dir_open,
++ .readdir = ovl_readdir,
++ .llseek = ovl_dir_llseek,
++ .fsync = ovl_dir_fsync,
++ .release = ovl_dir_release,
++};
++
++static int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
++{
++ int err;
++ struct path lowerpath;
++ struct path upperpath;
++ struct ovl_cache_entry *p;
++ struct ovl_readdir_data rdd = { .list = list };
++
++ ovl_path_upper(dentry, &upperpath);
++ ovl_path_lower(dentry, &lowerpath);
++
++ err = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
++ if (err)
++ return err;
++
++ err = 0;
++
++ list_for_each_entry(p, list, l_node) {
++ if (p->is_whiteout)
++ continue;
++
++ if (p->name[0] == '.') {
++ if (p->len == 1)
++ continue;
++ if (p->len == 2 && p->name[1] == '.')
++ continue;
++ }
++ err = -ENOTEMPTY;
++ break;
++ }
++
++ return err;
++}
++
++static int ovl_remove_whiteouts(struct dentry *dir, struct list_head *list)
++{
++ struct path upperpath;
++ struct dentry *upperdir;
++ struct ovl_cache_entry *p;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++ int err;
++
++ ovl_path_upper(dir, &upperpath);
++ upperdir = upperpath.dentry;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ /*
++ * CAP_DAC_OVERRIDE for lookup and unlink
++ * CAP_SYS_ADMIN for setxattr of "trusted" namespace
++ * CAP_FOWNER for unlink in sticky directory
++ */
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ old_cred = override_creds(override_cred);
++
++ err = vfs_setxattr(upperdir, ovl_opaque_xattr, "y", 1, 0);
++ if (err)
++ goto out_revert_creds;
++
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ list_for_each_entry(p, list, l_node) {
++ struct dentry *dentry;
++ int ret;
++
++ if (!p->is_whiteout)
++ continue;
++
++ dentry = lookup_one_len(p->name, upperdir, p->len);
++ if (IS_ERR(dentry)) {
++ printk(KERN_WARNING
++ "overlayfs: failed to lookup whiteout %.*s: %li\n",
++ p->len, p->name, PTR_ERR(dentry));
++ continue;
++ }
++ ret = vfs_unlink(upperdir->d_inode, dentry);
++ dput(dentry);
++ if (ret)
++ printk(KERN_WARNING
++ "overlayfs: failed to unlink whiteout %.*s: %i\n",
++ p->len, p->name, ret);
++ }
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++
++out_revert_creds:
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return err;
++}
++
++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type)
++{
++ int err;
++ LIST_HEAD(list);
++
++ err = ovl_check_empty_dir(dentry, &list);
++ if (!err && type == OVL_PATH_MERGE)
++ err = ovl_remove_whiteouts(dentry, &list);
++
++ ovl_cache_free(&list);
++
++ return err;
++}
+--- /dev/null
++++ b/fs/overlayfs/super.c
+@@ -0,0 +1,664 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include <linux/mount.h>
++#include <linux/slab.h>
++#include <linux/parser.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include "overlayfs.h"
++
++MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
++MODULE_DESCRIPTION("Overlay filesystem");
++MODULE_LICENSE("GPL");
++
++struct ovl_config {
++ char *lowerdir;
++ char *upperdir;
++};
++
++/* private information held for overlayfs's superblock */
++struct ovl_fs {
++ struct vfsmount *upper_mnt;
++ struct vfsmount *lower_mnt;
++ /* pathnames of lower and upper dirs, for show_options */
++ struct ovl_config config;
++};
++
++/* private information held for every overlayfs dentry */
++struct ovl_entry {
++ /*
++ * Keep "double reference" on upper dentries, so that
++ * d_delete() doesn't think it's OK to reset d_inode to NULL.
++ */
++ struct dentry *__upperdentry;
++ struct dentry *lowerdentry;
++ union {
++ struct {
++ u64 version;
++ bool opaque;
++ };
++ struct rcu_head rcu;
++ };
++};
++
++const char *ovl_whiteout_xattr = "trusted.overlay.whiteout";
++const char *ovl_opaque_xattr = "trusted.overlay.opaque";
++
++
++enum ovl_path_type ovl_path_type(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ if (oe->__upperdentry) {
++ if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode))
++ return OVL_PATH_MERGE;
++ else
++ return OVL_PATH_UPPER;
++ } else {
++ return OVL_PATH_LOWER;
++ }
++}
++
++static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
++{
++ struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
++ smp_read_barrier_depends();
++ return upperdentry;
++}
++
++void ovl_path_upper(struct dentry *dentry, struct path *path)
++{
++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ path->mnt = ofs->upper_mnt;
++ path->dentry = ovl_upperdentry_dereference(oe);
++}
++
++void ovl_path_lower(struct dentry *dentry, struct path *path)
++{
++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ path->mnt = ofs->lower_mnt;
++ path->dentry = oe->lowerdentry;
++}
++
++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
++{
++
++ enum ovl_path_type type = ovl_path_type(dentry);
++
++ if (type == OVL_PATH_LOWER)
++ ovl_path_lower(dentry, path);
++ else
++ ovl_path_upper(dentry, path);
++
++ return type;
++}
++
++struct dentry *ovl_dentry_upper(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ return ovl_upperdentry_dereference(oe);
++}
++
++struct dentry *ovl_dentry_lower(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ return oe->lowerdentry;
++}
++
++struct dentry *ovl_dentry_real(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++ struct dentry *realdentry;
++
++ realdentry = ovl_upperdentry_dereference(oe);
++ if (!realdentry)
++ realdentry = oe->lowerdentry;
++
++ return realdentry;
++}
++
++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
++{
++ struct dentry *realdentry;
++
++ realdentry = ovl_upperdentry_dereference(oe);
++ if (realdentry) {
++ *is_upper = true;
++ } else {
++ realdentry = oe->lowerdentry;
++ *is_upper = false;
++ }
++ return realdentry;
++}
++
++bool ovl_dentry_is_opaque(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++ return oe->opaque;
++}
++
++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++ oe->opaque = opaque;
++}
++
++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex));
++ WARN_ON(oe->__upperdentry);
++ BUG_ON(!upperdentry->d_inode);
++ smp_wmb();
++ oe->__upperdentry = dget(upperdentry);
++}
++
++void ovl_dentry_version_inc(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
++ oe->version++;
++}
++
++u64 ovl_dentry_version_get(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
++ return oe->version;
++}
++
++bool ovl_is_whiteout(struct dentry *dentry)
++{
++ int res;
++ char val;
++
++ if (!dentry)
++ return false;
++ if (!dentry->d_inode)
++ return false;
++ if (!S_ISLNK(dentry->d_inode->i_mode))
++ return false;
++
++ res = vfs_getxattr(dentry, ovl_whiteout_xattr, &val, 1);
++ if (res == 1 && val == 'y')
++ return true;
++
++ return false;
++}
++
++static bool ovl_is_opaquedir(struct dentry *dentry)
++{
++ int res;
++ char val;
++
++ if (!S_ISDIR(dentry->d_inode->i_mode))
++ return false;
++
++ res = vfs_getxattr(dentry, ovl_opaque_xattr, &val, 1);
++ if (res == 1 && val == 'y')
++ return true;
++
++ return false;
++}
++
++static void ovl_entry_free(struct rcu_head *head)
++{
++ struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu);
++ kfree(oe);
++}
++
++static void ovl_dentry_release(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ if (oe) {
++ dput(oe->__upperdentry);
++ dput(oe->__upperdentry);
++ dput(oe->lowerdentry);
++ call_rcu(&oe->rcu, ovl_entry_free);
++ }
++}
++
++const struct dentry_operations ovl_dentry_operations = {
++ .d_release = ovl_dentry_release,
++};
++
++static struct ovl_entry *ovl_alloc_entry(void)
++{
++ return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
++}
++
++static inline struct dentry *ovl_lookup_real(struct dentry *dir,
++ struct qstr *name)
++{
++ struct dentry *dentry;
++
++ mutex_lock(&dir->d_inode->i_mutex);
++ dentry = lookup_one_len(name->name, dir, name->len);
++ mutex_unlock(&dir->d_inode->i_mutex);
++
++ if (IS_ERR(dentry)) {
++ if (PTR_ERR(dentry) == -ENOENT)
++ dentry = NULL;
++ } else if (!dentry->d_inode) {
++ dput(dentry);
++ dentry = NULL;
++ }
++ return dentry;
++}
++
++static int ovl_do_lookup(struct dentry *dentry)
++{
++ struct ovl_entry *oe;
++ struct dentry *upperdir;
++ struct dentry *lowerdir;
++ struct dentry *upperdentry = NULL;
++ struct dentry *lowerdentry = NULL;
++ struct inode *inode = NULL;
++ int err;
++
++ err = -ENOMEM;
++ oe = ovl_alloc_entry();
++ if (!oe)
++ goto out;
++
++ upperdir = ovl_dentry_upper(dentry->d_parent);
++ lowerdir = ovl_dentry_lower(dentry->d_parent);
++
++ if (upperdir) {
++ upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
++ err = PTR_ERR(upperdentry);
++ if (IS_ERR(upperdentry))
++ goto out_put_dir;
++
++ if (lowerdir && upperdentry &&
++ (S_ISLNK(upperdentry->d_inode->i_mode) ||
++ S_ISDIR(upperdentry->d_inode->i_mode))) {
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_dput_upper;
++
++ /* CAP_SYS_ADMIN needed for getxattr */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ old_cred = override_creds(override_cred);
++
++ if (ovl_is_opaquedir(upperdentry)) {
++ oe->opaque = true;
++ } else if (ovl_is_whiteout(upperdentry)) {
++ dput(upperdentry);
++ upperdentry = NULL;
++ oe->opaque = true;
++ }
++ revert_creds(old_cred);
++ put_cred(override_cred);
++ }
++ }
++ if (lowerdir && !oe->opaque) {
++ lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
++ err = PTR_ERR(lowerdentry);
++ if (IS_ERR(lowerdentry))
++ goto out_dput_upper;
++ }
++
++ if (lowerdentry && upperdentry &&
++ (!S_ISDIR(upperdentry->d_inode->i_mode) ||
++ !S_ISDIR(lowerdentry->d_inode->i_mode))) {
++ dput(lowerdentry);
++ lowerdentry = NULL;
++ oe->opaque = true;
++ }
++
++ if (lowerdentry || upperdentry) {
++ struct dentry *realdentry;
++
++ realdentry = upperdentry ? upperdentry : lowerdentry;
++ err = -ENOMEM;
++ inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
++ oe);
++ if (!inode)
++ goto out_dput;
++ }
++
++ if (upperdentry)
++ oe->__upperdentry = dget(upperdentry);
++
++ if (lowerdentry)
++ oe->lowerdentry = lowerdentry;
++
++ dentry->d_fsdata = oe;
++ dentry->d_op = &ovl_dentry_operations;
++ d_add(dentry, inode);
++
++ return 0;
++
++out_dput:
++ dput(lowerdentry);
++out_dput_upper:
++ dput(upperdentry);
++out_put_dir:
++ kfree(oe);
++out:
++ return err;
++}
++
++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *nd)
++{
++ int err = ovl_do_lookup(dentry);
++
++ if (err)
++ return ERR_PTR(err);
++
++ return NULL;
++}
++
++struct file *ovl_path_open(struct path *path, int flags)
++{
++ path_get(path);
++ return dentry_open(path->dentry, path->mnt, flags, current_cred());
++}
++
++static void ovl_put_super(struct super_block *sb)
++{
++ struct ovl_fs *ufs = sb->s_fs_info;
++
++ if (!(sb->s_flags & MS_RDONLY))
++ mnt_drop_write(ufs->upper_mnt);
++
++ mntput(ufs->upper_mnt);
++ mntput(ufs->lower_mnt);
++
++ kfree(ufs->config.lowerdir);
++ kfree(ufs->config.upperdir);
++ kfree(ufs);
++}
++
++static int ovl_remount_fs(struct super_block *sb, int *flagsp, char *data)
++{
++ int flags = *flagsp;
++ struct ovl_fs *ufs = sb->s_fs_info;
++
++ /* When remounting rw or ro, we need to adjust the write access to the
++ * upper fs.
++ */
++ if (((flags ^ sb->s_flags) & MS_RDONLY) == 0)
++ /* No change to readonly status */
++ return 0;
++
++ if (flags & MS_RDONLY) {
++ mnt_drop_write(ufs->upper_mnt);
++ return 0;
++ } else
++ return mnt_want_write(ufs->upper_mnt);
++}
++
++/**
++ * ovl_statfs
++ * @sb: The overlayfs super block
++ * @buf: The struct kstatfs to fill in with stats
++ *
++ * Get the filesystem statistics. As writes always target the upper layer
++ * filesystem pass the statfs to the same filesystem.
++ */
++static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ struct dentry *root_dentry = dentry->d_sb->s_root;
++ struct path path;
++ ovl_path_upper(root_dentry, &path);
++
++ if (!path.dentry->d_sb->s_op->statfs)
++ return -ENOSYS;
++ return path.dentry->d_sb->s_op->statfs(path.dentry, buf);
++}
++
++/**
++ * ovl_show_options
++ *
++ * Prints the mount options for a given superblock.
++ * Returns zero; does not fail.
++ */
++static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
++{
++ struct super_block *sb = dentry->d_sb;
++ struct ovl_fs *ufs = sb->s_fs_info;
++
++ seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
++ seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
++ return 0;
++}
++
++static const struct super_operations ovl_super_operations = {
++ .put_super = ovl_put_super,
++ .remount_fs = ovl_remount_fs,
++ .statfs = ovl_statfs,
++ .show_options = ovl_show_options,
++};
++
++enum {
++ Opt_lowerdir,
++ Opt_upperdir,
++ Opt_err,
++};
++
++static const match_table_t ovl_tokens = {
++ {Opt_lowerdir, "lowerdir=%s"},
++ {Opt_upperdir, "upperdir=%s"},
++ {Opt_err, NULL}
++};
++
++static int ovl_parse_opt(char *opt, struct ovl_config *config)
++{
++ char *p;
++
++ config->upperdir = NULL;
++ config->lowerdir = NULL;
++
++ while ((p = strsep(&opt, ",")) != NULL) {
++ int token;
++ substring_t args[MAX_OPT_ARGS];
++
++ if (!*p)
++ continue;
++
++ token = match_token(p, ovl_tokens, args);
++ switch (token) {
++ case Opt_upperdir:
++ kfree(config->upperdir);
++ config->upperdir = match_strdup(&args[0]);
++ if (!config->upperdir)
++ return -ENOMEM;
++ break;
++
++ case Opt_lowerdir:
++ kfree(config->lowerdir);
++ config->lowerdir = match_strdup(&args[0]);
++ if (!config->lowerdir)
++ return -ENOMEM;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++static int ovl_fill_super(struct super_block *sb, void *data, int silent)
++{
++ struct path lowerpath;
++ struct path upperpath;
++ struct inode *root_inode;
++ struct dentry *root_dentry;
++ struct ovl_entry *oe;
++ struct ovl_fs *ufs;
++ int err;
++
++ err = -ENOMEM;
++ ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL);
++ if (!ufs)
++ goto out;
++
++ err = ovl_parse_opt((char *) data, &ufs->config);
++ if (err)
++ goto out_free_ufs;
++
++ err = -EINVAL;
++ if (!ufs->config.upperdir || !ufs->config.lowerdir) {
++ printk(KERN_ERR "overlayfs: missing upperdir or lowerdir\n");
++ goto out_free_config;
++ }
++
++ oe = ovl_alloc_entry();
++ if (oe == NULL)
++ goto out_free_config;
++
++ root_inode = ovl_new_inode(sb, S_IFDIR, oe);
++ if (!root_inode)
++ goto out_free_oe;
++
++ err = kern_path(ufs->config.upperdir, LOOKUP_FOLLOW, &upperpath);
++ if (err)
++ goto out_put_root;
++
++ err = kern_path(ufs->config.lowerdir, LOOKUP_FOLLOW, &lowerpath);
++ if (err)
++ goto out_put_upperpath;
++
++ err = -ENOTDIR;
++ if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
++ !S_ISDIR(lowerpath.dentry->d_inode->i_mode))
++ goto out_put_lowerpath;
++
++ sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
++ lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
++
++ err = -EINVAL;
++ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++ printk(KERN_ERR "overlayfs: maximum fs stacking depth exceeded\n");
++ goto out_put_lowerpath;
++ }
++
++
++ ufs->upper_mnt = clone_private_mount(&upperpath);
++ err = PTR_ERR(ufs->upper_mnt);
++ if (IS_ERR(ufs->upper_mnt)) {
++ printk(KERN_ERR "overlayfs: failed to clone upperpath\n");
++ goto out_put_lowerpath;
++ }
++
++ ufs->lower_mnt = clone_private_mount(&lowerpath);
++ err = PTR_ERR(ufs->lower_mnt);
++ if (IS_ERR(ufs->lower_mnt)) {
++ printk(KERN_ERR "overlayfs: failed to clone lowerpath\n");
++ goto out_put_upper_mnt;
++ }
++
++ /*
++ * Make lower_mnt R/O. That way fchmod/fchown on lower file
++ * will fail instead of modifying lower fs.
++ */
++ ufs->lower_mnt->mnt_flags |= MNT_READONLY;
++
++ /* If the upper fs is r/o, we mark overlayfs r/o too */
++ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
++ sb->s_flags |= MS_RDONLY;
++
++ if (!(sb->s_flags & MS_RDONLY)) {
++ err = mnt_want_write(ufs->upper_mnt);
++ if (err)
++ goto out_put_lower_mnt;
++ }
++
++ err = -ENOMEM;
++ root_dentry = d_alloc_root(root_inode);
++ if (!root_dentry)
++ goto out_drop_write;
++
++ mntput(upperpath.mnt);
++ mntput(lowerpath.mnt);
++
++ oe->__upperdentry = dget(upperpath.dentry);
++ oe->lowerdentry = lowerpath.dentry;
++
++ root_dentry->d_fsdata = oe;
++ root_dentry->d_op = &ovl_dentry_operations;
++
++ sb->s_op = &ovl_super_operations;
++ sb->s_root = root_dentry;
++ sb->s_fs_info = ufs;
++
++ return 0;
++
++out_drop_write:
++ if (!(sb->s_flags & MS_RDONLY))
++ mnt_drop_write(ufs->upper_mnt);
++out_put_lower_mnt:
++ mntput(ufs->lower_mnt);
++out_put_upper_mnt:
++ mntput(ufs->upper_mnt);
++out_put_lowerpath:
++ path_put(&lowerpath);
++out_put_upperpath:
++ path_put(&upperpath);
++out_put_root:
++ iput(root_inode);
++out_free_oe:
++ kfree(oe);
++out_free_config:
++ kfree(ufs->config.lowerdir);
++ kfree(ufs->config.upperdir);
++out_free_ufs:
++ kfree(ufs);
++out:
++ return err;
++}
++
++static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *raw_data)
++{
++ return mount_nodev(fs_type, flags, raw_data, ovl_fill_super);
++}
++
++static struct file_system_type ovl_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "overlayfs",
++ .mount = ovl_mount,
++ .kill_sb = kill_anon_super,
++};
++
++static int __init ovl_init(void)
++{
++ return register_filesystem(&ovl_fs_type);
++}
++
++static void __exit ovl_exit(void)
++{
++ unregister_filesystem(&ovl_fs_type);
++}
++
++module_init(ovl_init);
++module_exit(ovl_exit);
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1299,6 +1299,7 @@ long do_splice_direct(struct file *in, l
+
+ return ret;
+ }
++EXPORT_SYMBOL(do_splice_direct);
+
+ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+ struct pipe_inode_info *opipe,
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -484,6 +484,12 @@ struct iattr {
+ */
+ #include <linux/quota.h>
+
++/*
++ * Maximum number of layers of fs stack. Needs to be limited to
++ * prevent kernel stack overflow
++ */
++#define FILESYSTEM_MAX_STACK_DEPTH 2
++
+ /**
+ * enum positive_aop_returns - aop return codes with specific semantics
+ *
+@@ -1496,6 +1502,11 @@ struct super_block {
+
+ /* Being remounted read-only */
+ int s_readonly_remount;
++
++ /*
++ * Indicates how deep in a filesystem stack this SB is
++ */
++ int s_stack_depth;
+ };
+
+ /* superblock cache pruning functions */
+@@ -1653,6 +1664,8 @@ struct inode_operations {
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ u64 len);
++ struct file *(*open) (struct dentry *, struct file *,
++ const struct cred *);
+ } ____cacheline_aligned;
+
+ struct seq_file;
+@@ -2023,6 +2036,7 @@ extern long do_sys_open(int dfd, const c
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+ const char *, int);
++extern struct file *vfs_open(struct path *, struct file *, const struct cred *);
+ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
+ const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -66,6 +66,9 @@ extern void mnt_pin(struct vfsmount *mnt
+ extern void mnt_unpin(struct vfsmount *mnt);
+ extern int __mnt_is_readonly(struct vfsmount *mnt);
+
++struct path;
++extern struct vfsmount *clone_private_mount(struct path *path);
++
+ struct file_system_type;
+ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
diff --git a/target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch b/target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch
new file mode 100644
index 0000000..400d2ed
--- /dev/null
+++ b/target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch
@@ -0,0 +1,41 @@
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -795,7 +795,7 @@ static int ehci_run (struct usb_hcd *hcd
+ "USB %x.%x started, EHCI %x.%02x%s\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+ temp >> 8, temp & 0xff,
+- ignore_oc ? ", overcurrent ignored" : "");
++ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
+
+ ehci_writel(ehci, INTR_MASK,
+ &ehci->regs->intr_enable); /* Turn On Interrupts */
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -578,7 +578,7 @@ ehci_hub_status_data (struct usb_hcd *hc
+ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+ * PORT_POWER; that's surprising, but maybe within-spec.
+ */
+- if (!ignore_oc)
++ if (!ignore_oc && !ehci->ignore_oc)
+ mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ else
+ mask = PORT_CSC | PORT_PEC;
+@@ -803,7 +803,7 @@ static int ehci_hub_control (
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+- if ((temp & PORT_OCC) && !ignore_oc){
++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /*
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -147,6 +147,7 @@ struct ehci_hcd { /* one per controlle
+ unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
+ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
++ unsigned ignore_oc:1;
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
diff --git a/target/linux/generic/patches-3.3/110-fix_mtd_include.patch b/target/linux/generic/patches-3.3/110-fix_mtd_include.patch
new file mode 100644
index 0000000..c63dbc0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/110-fix_mtd_include.patch
@@ -0,0 +1,10 @@
+--- a/include/linux/mtd/physmap.h
++++ b/include/linux/mtd/physmap.h
+@@ -17,6 +17,7 @@
+
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/platform_device.h>
+
+ struct map_info;
+ struct platform_device;
diff --git a/target/linux/generic/patches-3.3/200-fix_localversion.patch b/target/linux/generic/patches-3.3/200-fix_localversion.patch
new file mode 100644
index 0000000..0d1bae8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/200-fix_localversion.patch
@@ -0,0 +1,11 @@
+--- a/scripts/setlocalversion
++++ b/scripts/setlocalversion
+@@ -168,7 +168,7 @@ else
+ # annotated or signed tagged state (as git describe only
+ # looks at signed or annotated tags - git tag -a/-s) and
+ # LOCALVERSION= is not specified
+- if test "${LOCALVERSION+set}" != "set"; then
++ if test "${CONFIG_LOCALVERSION+set}" != "set"; then
+ scm=$(scm_version --short)
+ res="$res${scm:++}"
+ fi
diff --git a/target/linux/generic/patches-3.3/201-extra_optimization.patch b/target/linux/generic/patches-3.3/201-extra_optimization.patch
new file mode 100644
index 0000000..6fe8b09
--- /dev/null
+++ b/target/linux/generic/patches-3.3/201-extra_optimization.patch
@@ -0,0 +1,24 @@
+--- a/Makefile
++++ b/Makefile
+@@ -559,9 +559,9 @@ endif # $(dot-config)
+ all: vmlinux
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS += -Os
++KBUILD_CFLAGS += -Os -fno-caller-saves
+ else
+-KBUILD_CFLAGS += -O2
++KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch -fno-caller-saves
+ endif
+
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+@@ -620,6 +620,9 @@ endif
+ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
+ CHECKFLAGS += $(NOSTDINC_FLAGS)
+
++# improve gcc optimization
++CFLAGS += $(call cc-option,-funit-at-a-time,)
++
+ # warn about C99 declaration after statement
+ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
+
diff --git a/target/linux/generic/patches-3.3/210-darwin_scripts_include.patch b/target/linux/generic/patches-3.3/210-darwin_scripts_include.patch
new file mode 100644
index 0000000..72d344e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/210-darwin_scripts_include.patch
@@ -0,0 +1,78 @@
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -22,6 +22,35 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <ctype.h>
++#ifdef __APPLE__
++/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */
++void *memmem (const void *haystack, size_t haystack_len,
++ const void *needle, size_t needle_len)
++{
++ const char *begin;
++ const char *const last_possible
++ = (const char *) haystack + haystack_len - needle_len;
++
++ if (needle_len == 0)
++ /* The first occurrence of the empty string is deemed to occur at
++ the beginning of the string. */
++ return (void *) haystack;
++
++ /* Sanity check, otherwise the loop might search through the whole
++ memory. */
++ if (__builtin_expect (haystack_len < needle_len, 0))
++ return NULL;
++
++ for (begin = (const char *) haystack; begin <= last_possible; ++begin)
++ if (begin[0] == ((const char *) needle)[0] &&
++ !memcmp ((const void *) &begin[1],
++ (const void *) ((const char *) needle + 1),
++ needle_len - 1))
++ return (void *) begin;
++
++ return NULL;
++}
++#endif
+
+ #ifndef ARRAY_SIZE
+ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -123,6 +123,9 @@ check-lxdialog := $(srctree)/$(src)/lxd
+ # we really need to do so. (Do not call gcc as part of make mrproper)
+ HOST_EXTRACFLAGS += $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags) \
+ -DLOCALE
++ifeq ($(shell uname -s),Darwin)
++HOST_LOADLIBES += -lncurses
++endif
+
+ # ===========================================================================
+ # Shared Makefile for the various kconfig executables:
+--- a/scripts/mod/mk_elfconfig.c
++++ b/scripts/mod/mk_elfconfig.c
+@@ -1,7 +1,11 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
++#ifndef __APPLE__
+ #include <elf.h>
++#else
++#include "../../../../../tools/sstrip/include/elf.h"
++#endif
+
+ int
+ main(int argc, char **argv)
+--- a/scripts/mod/modpost.h
++++ b/scripts/mod/modpost.h
+@@ -7,7 +7,11 @@
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
++#if !(defined(__APPLE__) || defined(__CYGWIN__))
+ #include <elf.h>
++#else
++#include "../../../../../tools/sstrip/include/elf.h"
++#endif
+
+ #include "elfconfig.h"
+
diff --git a/target/linux/generic/patches-3.3/211-stddef_include.patch b/target/linux/generic/patches-3.3/211-stddef_include.patch
new file mode 100644
index 0000000..7fe248d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/211-stddef_include.patch
@@ -0,0 +1,17 @@
+--- a/include/linux/stddef.h
++++ b/include/linux/stddef.h
+@@ -16,6 +16,7 @@ enum {
+ false = 0,
+ true = 1
+ };
++#endif /* __KERNEL__ */
+
+ #undef offsetof
+ #ifdef __compiler_offsetof
+@@ -23,6 +24,5 @@ enum {
+ #else
+ #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+ #endif
+-#endif /* __KERNEL__ */
+
+ #endif
diff --git a/target/linux/generic/patches-3.3/220-module_exports.patch b/target/linux/generic/patches-3.3/220-module_exports.patch
new file mode 100644
index 0000000..be6b6ff
--- /dev/null
+++ b/target/linux/generic/patches-3.3/220-module_exports.patch
@@ -0,0 +1,89 @@
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -52,6 +52,27 @@
+ #define LOAD_OFFSET 0
+ #endif
+
++#ifndef SYMTAB_KEEP_STR
++#define SYMTAB_KEEP_STR *(__ksymtab_strings+*)
++#define SYMTAB_DISCARD_STR
++#else
++#define SYMTAB_DISCARD_STR *(__ksymtab_strings+*)
++#endif
++
++#ifndef SYMTAB_KEEP
++#define SYMTAB_KEEP *(SORT(___ksymtab+*))
++#define SYMTAB_DISCARD
++#else
++#define SYMTAB_DISCARD *(SORT(___ksymtab+*))
++#endif
++
++#ifndef SYMTAB_KEEP_GPL
++#define SYMTAB_KEEP_GPL *(SORT(___ksymtab_gpl+*))
++#define SYMTAB_DISCARD_GPL
++#else
++#define SYMTAB_DISCARD_GPL *(SORT(___ksymtab_gpl+*))
++#endif
++
+ #ifndef SYMBOL_PREFIX
+ #define VMLINUX_SYMBOL(sym) sym
+ #else
+@@ -275,14 +296,14 @@
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab) = .; \
+- *(SORT(___ksymtab+*)) \
++ SYMTAB_KEEP \
+ VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+- *(SORT(___ksymtab_gpl+*)) \
++ SYMTAB_KEEP_GPL \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ } \
+ \
+@@ -344,7 +365,7 @@
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+- *(__ksymtab_strings) \
++ SYMTAB_KEEP_STR \
+ } \
+ \
+ /* __*init sections */ \
+@@ -676,6 +697,9 @@
+ EXIT_TEXT \
+ EXIT_DATA \
+ EXIT_CALL \
++ SYMTAB_DISCARD \
++ SYMTAB_DISCARD_GPL \
++ SYMTAB_DISCARD_STR \
+ *(.discard) \
+ *(.discard.*) \
+ }
+--- a/include/linux/export.h
++++ b/include/linux/export.h
+@@ -45,12 +45,19 @@ extern struct module __this_module;
+ #define __CRC_SYMBOL(sym, sec)
+ #endif
+
++#ifdef MODULE
++#define __EXPORT_SUFFIX(sym)
++#else
++#define __EXPORT_SUFFIX(sym) "+" #sym
++#endif
++
+ /* For every exported symbol, place a struct in the __ksymtab section */
+ #define __EXPORT_SYMBOL(sym, sec) \
+ extern typeof(sym) sym; \
+ __CRC_SYMBOL(sym, sec) \
+ static const char __kstrtab_##sym[] \
+- __attribute__((section("__ksymtab_strings"), aligned(1))) \
++ __attribute__((section("__ksymtab_strings" \
++ __EXPORT_SUFFIX(sym)), aligned(1))) \
+ = MODULE_SYMBOL_PREFIX #sym; \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __used \
diff --git a/target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch b/target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch
new file mode 100644
index 0000000..f17f40a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch
@@ -0,0 +1,54 @@
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -296,7 +296,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^)
+
+ quiet_cmd_lzma = LZMA $@
+ cmd_lzma = (cat $(filter-out FORCE,$^) | \
+- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
++ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ (rm -f $@ ; false)
+
+ quiet_cmd_lzo = LZO $@
+--- a/scripts/gen_initramfs_list.sh
++++ b/scripts/gen_initramfs_list.sh
+@@ -226,7 +226,7 @@ cpio_list=
+ output="/dev/stdout"
+ output_file=""
+ is_cpio_compressed=
+-compr="gzip -n -9 -f"
++compr="gzip -n -9 -f -"
+
+ arg="$1"
+ case "$arg" in
+@@ -240,9 +240,9 @@ case "$arg" in
+ output_file="$1"
+ cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
+ output=${cpio_list}
+- echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f"
+- echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
+- echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
++ echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f -"
++ echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f -"
++ echo "$output_file" | grep -q "\.lzma$" && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
+ echo "$output_file" | grep -q "\.xz$" && \
+ compr="xz --check=crc32 --lzma2=dict=1MiB"
+ echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
+@@ -303,7 +303,7 @@ if [ ! -z ${output_file} ]; then
+ if [ "${is_cpio_compressed}" = "compressed" ]; then
+ cat ${cpio_tfile} > ${output_file}
+ else
+- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
++ (cat ${cpio_tfile} | ${compr} > ${output_file}) \
+ || (rm -f ${output_file} ; false)
+ fi
+ [ -z ${cpio_file} ] && rm ${cpio_tfile}
+--- a/lib/decompress.c
++++ b/lib/decompress.c
+@@ -40,6 +40,7 @@ static const struct compress_format {
+ { {037, 0236}, "gzip", gunzip },
+ { {0x42, 0x5a}, "bzip2", bunzip2 },
+ { {0x5d, 0x00}, "lzma", unlzma },
++ { {0x6d, 0x00}, "lzma-openwrt", unlzma },
+ { {0xfd, 0x37}, "xz", unxz },
+ { {0x89, 0x4c}, "lzo", unlzo },
+ { {0, 0}, NULL, NULL }
diff --git a/target/linux/generic/patches-3.3/250-netfilter_depends.patch b/target/linux/generic/patches-3.3/250-netfilter_depends.patch
new file mode 100644
index 0000000..cbe3892
--- /dev/null
+++ b/target/linux/generic/patches-3.3/250-netfilter_depends.patch
@@ -0,0 +1,18 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -181,7 +181,6 @@ config NF_CONNTRACK_FTP
+
+ config NF_CONNTRACK_H323
+ tristate "H.323 protocol support"
+- depends on (IPV6 || IPV6=n)
+ depends on NETFILTER_ADVANCED
+ help
+ H.323 is a VoIP signalling protocol from ITU-T. As one of the most
+@@ -627,7 +626,6 @@ config NETFILTER_XT_TARGET_SECMARK
+
+ config NETFILTER_XT_TARGET_TCPMSS
+ tristate '"TCPMSS" target support'
+- depends on (IPV6 || IPV6=n)
+ default m if NETFILTER_ADVANCED=n
+ ---help---
+ This option adds a `TCPMSS' target, which allows you to alter the
diff --git a/target/linux/generic/patches-3.3/251-sound_kconfig.patch b/target/linux/generic/patches-3.3/251-sound_kconfig.patch
new file mode 100644
index 0000000..f374009
--- /dev/null
+++ b/target/linux/generic/patches-3.3/251-sound_kconfig.patch
@@ -0,0 +1,11 @@
+--- a/sound/core/Kconfig
++++ b/sound/core/Kconfig
+@@ -7,7 +7,7 @@ config SND_PCM
+ select SND_TIMER
+
+ config SND_HWDEP
+- tristate
++ tristate "Sound hardware support"
+
+ config SND_RAWMIDI
+ tristate
diff --git a/target/linux/generic/patches-3.3/252-mv_cesa_depends.patch b/target/linux/generic/patches-3.3/252-mv_cesa_depends.patch
new file mode 100644
index 0000000..b43d29c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/252-mv_cesa_depends.patch
@@ -0,0 +1,10 @@
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -172,6 +172,7 @@ config CRYPTO_DEV_MV_CESA
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
++ select CRYPTO_HASH2
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you to utilize the Cryptographic Engines and
diff --git a/target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch b/target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch
new file mode 100644
index 0000000..29d2a41
--- /dev/null
+++ b/target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch
@@ -0,0 +1,29 @@
+--- a/drivers/ssb/Kconfig
++++ b/drivers/ssb/Kconfig
+@@ -29,6 +29,7 @@ config SSB_SPROM
+ config SSB_BLOCKIO
+ bool
+ depends on SSB
++ default y
+
+ config SSB_PCIHOST_POSSIBLE
+ bool
+@@ -49,7 +50,7 @@ config SSB_PCIHOST
+ config SSB_B43_PCI_BRIDGE
+ bool
+ depends on SSB_PCIHOST
+- default n
++ default y
+
+ config SSB_PCMCIAHOST_POSSIBLE
+ bool
+--- a/drivers/bcma/Kconfig
++++ b/drivers/bcma/Kconfig
+@@ -17,6 +17,7 @@ config BCMA
+ config BCMA_BLOCKIO
+ bool
+ depends on BCMA
++ default y
+
+ config BCMA_HOST_PCI_POSSIBLE
+ bool
diff --git a/target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch b/target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch
new file mode 100644
index 0000000..07b5f76
--- /dev/null
+++ b/target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch
@@ -0,0 +1,23 @@
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -204,16 +204,16 @@ config BCH_CONST_T
+ # Textsearch support is select'ed if needed
+ #
+ config TEXTSEARCH
+- boolean
++ boolean "Textsearch support"
+
+ config TEXTSEARCH_KMP
+- tristate
++ tristate "Textsearch KMP"
+
+ config TEXTSEARCH_BM
+- tristate
++ tristate "Textsearch BM"
+
+ config TEXTSEARCH_FSM
+- tristate
++ tristate "Textsearch FSM"
+
+ config BTREE
+ boolean
diff --git a/target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch b/target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch
new file mode 100644
index 0000000..8dde331
--- /dev/null
+++ b/target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch
@@ -0,0 +1,19 @@
+--- a/net/wireless/Kconfig
++++ b/net/wireless/Kconfig
+@@ -143,13 +143,13 @@ config LIB80211
+ you want this built into your kernel.
+
+ config LIB80211_CRYPT_WEP
+- tristate
++ tristate "LIB80211_CRYPT_WEP"
+
+ config LIB80211_CRYPT_CCMP
+- tristate
++ tristate "LIB80211_CRYPT_CCMP"
+
+ config LIB80211_CRYPT_TKIP
+- tristate
++ tristate "LIB80211_CRYPT_TKIP"
+
+ config LIB80211_DEBUG
+ bool "lib80211 debugging messages"
diff --git a/target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch b/target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch
new file mode 100644
index 0000000..8462c71
--- /dev/null
+++ b/target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch
@@ -0,0 +1,47 @@
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -31,7 +31,7 @@ config CRYPTO_FIPS
+ this is.
+
+ config CRYPTO_ALGAPI
+- tristate
++ tristate "ALGAPI"
+ select CRYPTO_ALGAPI2
+ help
+ This option provides the API for cryptographic algorithms.
+@@ -40,7 +40,7 @@ config CRYPTO_ALGAPI2
+ tristate
+
+ config CRYPTO_AEAD
+- tristate
++ tristate "AEAD"
+ select CRYPTO_AEAD2
+ select CRYPTO_ALGAPI
+
+@@ -49,7 +49,7 @@ config CRYPTO_AEAD2
+ select CRYPTO_ALGAPI2
+
+ config CRYPTO_BLKCIPHER
+- tristate
++ tristate "BLKCIPHER"
+ select CRYPTO_BLKCIPHER2
+ select CRYPTO_ALGAPI
+
+@@ -60,7 +60,7 @@ config CRYPTO_BLKCIPHER2
+ select CRYPTO_WORKQUEUE
+
+ config CRYPTO_HASH
+- tristate
++ tristate "HASH"
+ select CRYPTO_HASH2
+ select CRYPTO_ALGAPI
+
+@@ -69,7 +69,7 @@ config CRYPTO_HASH2
+ select CRYPTO_ALGAPI2
+
+ config CRYPTO_RNG
+- tristate
++ tristate "RNG"
+ select CRYPTO_RNG2
+ select CRYPTO_ALGAPI
+
diff --git a/target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch b/target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch
new file mode 100644
index 0000000..daac589
--- /dev/null
+++ b/target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch
@@ -0,0 +1,22 @@
+--- a/net/wireless/Kconfig
++++ b/net/wireless/Kconfig
+@@ -1,5 +1,5 @@
+ config WIRELESS_EXT
+- bool
++ bool "Wireless extensions"
+
+ config WEXT_CORE
+ def_bool y
+@@ -11,10 +11,10 @@ config WEXT_PROC
+ depends on WEXT_CORE
+
+ config WEXT_SPY
+- bool
++ bool "WEXT_SPY"
+
+ config WEXT_PRIV
+- bool
++ bool "WEXT_PRIV"
+
+ config CFG80211
+ tristate "cfg80211 - wireless configuration API"
diff --git a/target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch b/target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch
new file mode 100644
index 0000000..b114ef4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch
@@ -0,0 +1,39 @@
+From: Mark Miller <mark@mirell.org>
+
+This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
+certain Broadcom chipsets running CFE in order to load the kernel.
+
+Signed-off-by: Mark Miller <mark@mirell.org>
+Acked-by: Rob Landley <rob@landley.net>
+---
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -877,9 +877,6 @@ config ARC
+ config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+-config BOOT_RAW
+- bool
+-
+ config CEVT_BCM1480
+ bool
+
+@@ -2330,6 +2327,18 @@ config USE_OF
+ help
+ Include support for flattened device tree machine descriptions.
+
++config BOOT_RAW
++ bool "Enable the kernel to be executed from the load address"
++ default n
++ help
++ Allow the kernel to be executed from the load address for
++ bootloaders which cannot read the ELF format. This places
++ a jump to start_kernel at the load address.
++
++ If unsure, say N.
++
++
++
+ endmenu
+
+ config LOCKDEP_SUPPORT
diff --git a/target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch b/target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch
new file mode 100644
index 0000000..9193c68
--- /dev/null
+++ b/target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch
@@ -0,0 +1,28 @@
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -976,6 +976,10 @@ config SYNC_R4K
+ config MIPS_MACHINE
+ def_bool n
+
++config IMAGE_CMDLINE_HACK
++ bool "OpenWrt specific image command line hack"
++ default n
++
+ config NO_IOPORT
+ def_bool n
+
+--- a/arch/mips/kernel/head.S
++++ b/arch/mips/kernel/head.S
+@@ -141,6 +141,12 @@ FEXPORT(__kernel_entry)
+ j kernel_entry
+ #endif
+
++#ifdef CONFIG_IMAGE_CMDLINE_HACK
++ .ascii "CMDLINE:"
++EXPORT(__image_cmdline)
++ .fill 0x400
++#endif /* CONFIG_IMAGE_CMDLINE_HACK */
++
+ __REF
+
+ NESTED(kernel_entry, 16, sp) # kernel entry point
diff --git a/target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch b/target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch
new file mode 100644
index 0000000..1bcc74b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch
@@ -0,0 +1,18 @@
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -85,6 +85,7 @@ register struct thread_info *__current_t
+
+ #define STACK_WARN (THREAD_SIZE / 8)
+
++#if 0
+ #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+@@ -96,6 +97,7 @@ register struct thread_info *__current_t
+ #endif
+
+ #define free_thread_info(info) kfree(info)
++#endif
+
+ #endif /* !__ASSEMBLY__ */
+
diff --git a/target/linux/generic/patches-3.3/303-mips_fix_kexec.patch b/target/linux/generic/patches-3.3/303-mips_fix_kexec.patch
new file mode 100644
index 0000000..e6928dd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/303-mips_fix_kexec.patch
@@ -0,0 +1,11 @@
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -52,7 +52,7 @@ machine_kexec(struct kimage *image)
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+- kexec_start_address = image->start;
++ kexec_start_address = (unsigned long) phys_to_virt(image->start);
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+
diff --git a/target/linux/generic/patches-3.3/304-mips_disable_fpu.patch b/target/linux/generic/patches-3.3/304-mips_disable_fpu.patch
new file mode 100644
index 0000000..e747fea
--- /dev/null
+++ b/target/linux/generic/patches-3.3/304-mips_disable_fpu.patch
@@ -0,0 +1,160 @@
+MIPS: allow disabling the kernel FPU emulator
+
+This patch allows turning off the in-kernel Algorithmics
+FPU emulator support, which allows one to save a couple of
+precious blocks on an embedded system.
+
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+--
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -961,6 +961,17 @@ config I8259
+ config MIPS_BONITO64
+ bool
+
++config MIPS_FPU_EMU
++ bool "Enable FPU emulation"
++ default y
++ help
++ This option allows building a kernel with or without the Algorithmics
++ FPU emulator enabled. Turning off this option results in a kernel which
++ does not catch floating operations exceptions. Make sure that your toolchain
++ is configured to enable software floating point emulation in that case.
++
++ If unsure say Y here.
++
+ config MIPS_MSC
+ bool
+
+--- a/arch/mips/math-emu/Makefile
++++ b/arch/mips/math-emu/Makefile
+@@ -2,11 +2,13 @@
+ # Makefile for the Linux/MIPS kernel FPU emulation.
+ #
+
+-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
++obj-y := kernel_linkage.o dsemul.o cp1emu.o
++
++obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
+ ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \
+ dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \
+ dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
+ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
+ sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
+- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
++ dp_sqrt.o sp_sqrt.o
+
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -58,7 +58,11 @@
+ #define __mips 4
+
+ /* Function which emulates a floating point instruction. */
++#ifdef CONFIG_DEBUG_FS
++DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
++#endif
+
++#ifdef CONFIG_MIPS_FPU_EMU
+ static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
+ mips_instruction);
+
+@@ -69,10 +73,6 @@ static int fpux_emu(struct pt_regs *,
+
+ /* Further private data for which no space exists in mips_fpu_struct */
+
+-#ifdef CONFIG_DEBUG_FS
+-DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+-#endif
+-
+ /* Control registers */
+
+ #define FPCREG_RID 0 /* $0 = revision id */
+@@ -1360,7 +1360,6 @@ int fpu_emulator_cop1Handler(struct pt_r
+
+ return sig;
+ }
+-
+ #ifdef CONFIG_DEBUG_FS
+
+ static int fpuemu_stat_get(void *data, u64 *val)
+@@ -1409,4 +1408,11 @@ static int __init debugfs_fpuemu(void)
+ return 0;
+ }
+ __initcall(debugfs_fpuemu);
+-#endif
++#endif /* CONFIG_DEBUGFS */
++#else
++int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
++ int has_fpu)
++{
++ return 0;
++}
++#endif /* CONFIG_MIPS_FPU_EMU */
+--- a/arch/mips/math-emu/dsemul.c
++++ b/arch/mips/math-emu/dsemul.c
+@@ -109,6 +109,7 @@ int mips_dsemul(struct pt_regs *regs, mi
+ return SIGILL; /* force out of emulation loop */
+ }
+
++#ifdef CONFIG_MIPS_FPU_EMU
+ int do_dsemulret(struct pt_regs *xcp)
+ {
+ struct emuframe __user *fr;
+@@ -165,3 +166,9 @@ int do_dsemulret(struct pt_regs *xcp)
+
+ return 1;
+ }
++#else
++int do_dsemulret(struct pt_regs *xcp)
++{
++ return 0;
++}
++#endif /* CONFIG_MIPS_FPU_EMU */
+--- a/arch/mips/math-emu/kernel_linkage.c
++++ b/arch/mips/math-emu/kernel_linkage.c
+@@ -29,6 +29,7 @@
+
+ #define SIGNALLING_NAN 0x7ff800007ff80000LL
+
++#ifdef CONFIG_MIPS_FPU_EMU
+ void fpu_emulator_init_fpu(void)
+ {
+ static int first = 1;
+@@ -112,4 +113,36 @@ int fpu_emulator_restore_context32(struc
+
+ return err;
+ }
+-#endif
++#endif /* CONFIG_64BIT */
++#else
++
++void fpu_emulator_init_fpu(void)
++{
++ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain"
++ "was compiled with software floating point support (soft-float)\n");
++ return;
++}
++
++int fpu_emulator_save_context(struct sigcontext __user *sc)
++{
++ return 0;
++}
++
++int fpu_emulator_restore_context(struct sigcontext __user *sc)
++{
++ return 0;
++}
++
++int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
++{
++ return 0;
++}
++
++int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
++{
++ return 0;
++}
++
++#ifdef CONFIG_64BIT
++#endif /* CONFIG_64BIT */
++#endif /* CONFIG_MIPS_FPU_EMU */
diff --git a/target/linux/generic/patches-3.3/305-mips_module_reloc.patch b/target/linux/generic/patches-3.3/305-mips_module_reloc.patch
new file mode 100644
index 0000000..b4b142c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/305-mips_module_reloc.patch
@@ -0,0 +1,371 @@
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -90,8 +90,8 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+-KBUILD_AFLAGS_MODULE += -mlong-calls
+-KBUILD_CFLAGS_MODULE += -mlong-calls
++KBUILD_AFLAGS_MODULE += -mno-long-calls
++KBUILD_CFLAGS_MODULE += -mno-long-calls
+
+ cflags-y += -ffreestanding
+
+--- a/arch/mips/include/asm/module.h
++++ b/arch/mips/include/asm/module.h
+@@ -9,6 +9,11 @@ struct mod_arch_specific {
+ struct list_head dbe_list;
+ const struct exception_table_entry *dbe_start;
+ const struct exception_table_entry *dbe_end;
++
++ void *phys_plt_tbl;
++ void *virt_plt_tbl;
++ unsigned int phys_plt_offset;
++ unsigned int virt_plt_offset;
+ };
+
+ typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
+--- a/arch/mips/kernel/module.c
++++ b/arch/mips/kernel/module.c
+@@ -44,14 +44,219 @@ static struct mips_hi16 *mips_hi16_list;
+ static LIST_HEAD(dbe_list);
+ static DEFINE_SPINLOCK(dbe_lock);
+
+-#ifdef MODULE_START
++/*
++ * Get the potential max trampolines size required of the init and
++ * non-init sections. Only used if we cannot find enough contiguous
++ * physically mapped memory to put the module into.
++ */
++static unsigned int
++get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
++ const char *secstrings, unsigned int symindex, bool is_init)
++{
++ unsigned long ret = 0;
++ unsigned int i, j;
++ Elf_Sym *syms;
++
++ /* Everything marked ALLOC (this includes the exported symbols) */
++ for (i = 1; i < hdr->e_shnum; ++i) {
++ unsigned int info = sechdrs[i].sh_info;
++
++ if (sechdrs[i].sh_type != SHT_REL
++ && sechdrs[i].sh_type != SHT_RELA)
++ continue;
++
++ /* Not a valid relocation section? */
++ if (info >= hdr->e_shnum)
++ continue;
++
++ /* Don't bother with non-allocated sections */
++ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
++ continue;
++
++ /* If it's called *.init*, and we're not init, we're
++ not interested */
++ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
++ != is_init)
++ continue;
++
++ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
++ if (sechdrs[i].sh_type == SHT_REL) {
++ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rel[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ } else {
++ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rela[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ }
++ }
++
++ return ret;
++}
++
++#ifndef MODULE_START
++static void *alloc_phys(unsigned long size)
++{
++ unsigned order;
++ struct page *page;
++ struct page *p;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_THISNODE, order);
++ if (!page)
++ return NULL;
++
++ split_page(page, order);
++
++ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
++ __free_page(p);
++
++ return page_address(page);
++}
++#endif
++
++static void free_phys(void *ptr, unsigned long size)
++{
++ struct page *page;
++ struct page *end;
++
++ page = virt_to_page(ptr);
++ end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
++
++ for (; page < end; ++page)
++ __free_page(page);
++}
++
++
+ void *module_alloc(unsigned long size)
+ {
++#ifdef MODULE_START
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, -1,
+ __builtin_return_address(0));
++#else
++ void *ptr;
++
++ if (size == 0)
++ return NULL;
++
++ ptr = alloc_phys(size);
++
++ /* If we failed to allocate physically contiguous memory,
++ * fall back to regular vmalloc. The module loader code will
++ * create jump tables to handle long jumps */
++ if (!ptr)
++ return vmalloc(size);
++
++ return ptr;
++#endif
+ }
++
++static inline bool is_phys_addr(void *ptr)
++{
++#ifdef CONFIG_64BIT
++ return (KSEGX((unsigned long)ptr) == CKSEG0);
++#else
++ return (KSEGX(ptr) == KSEG0);
+ #endif
++}
++
++/* Free memory returned from module_alloc */
++void module_free(struct module *mod, void *module_region)
++{
++ if (is_phys_addr(module_region)) {
++ if (mod->module_init == module_region)
++ free_phys(module_region, mod->init_size);
++ else if (mod->module_core == module_region)
++ free_phys(module_region, mod->core_size);
++ else
++ BUG();
++ } else {
++ vfree(module_region);
++ }
++}
++
++static void *__module_alloc(int size, bool phys)
++{
++ void *ptr;
++
++ if (phys)
++ ptr = kmalloc(size, GFP_KERNEL);
++ else
++ ptr = vmalloc(size);
++ return ptr;
++}
++
++static void __module_free(void *ptr)
++{
++ if (is_phys_addr(ptr))
++ kfree(ptr);
++ else
++ vfree(ptr);
++}
++
++int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
++ char *secstrings, struct module *mod)
++{
++ unsigned int symindex = 0;
++ unsigned int core_size, init_size;
++ int i;
++
++ for (i = 1; i < hdr->e_shnum; i++)
++ if (sechdrs[i].sh_type == SHT_SYMTAB)
++ symindex = i;
++
++ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
++ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
++
++ mod->arch.phys_plt_offset = 0;
++ mod->arch.virt_plt_offset = 0;
++ mod->arch.phys_plt_tbl = NULL;
++ mod->arch.virt_plt_tbl = NULL;
++
++ if ((core_size + init_size) == 0)
++ return 0;
++
++ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
++ if (!mod->arch.phys_plt_tbl)
++ return -ENOMEM;
++
++ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
++ if (!mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+
+ static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+ {
+@@ -72,28 +277,36 @@ static int apply_r_mips_32_rela(struct m
+ return 0;
+ }
+
+-static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
++static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
++ void *start, Elf_Addr v)
+ {
+- if (v % 4) {
+- pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
+- me->name);
+- return -ENOEXEC;
+- }
++ unsigned *tramp = start + *plt_offset;
++ *plt_offset += 4 * sizeof(int);
+
+- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- printk(KERN_ERR
+- "module %s: relocation overflow\n",
+- me->name);
+- return -ENOEXEC;
+- }
++ /* adjust carry for addiu */
++ if (v & 0x00008000)
++ v += 0x10000;
+
+- *location = (*location & ~0x03ffffff) |
+- ((*location + (v >> 2)) & 0x03ffffff);
++ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
++ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
++ tramp[2] = 0x03200008; /* jr t9 */
++ tramp[3] = 0x00000000; /* nop */
+
+- return 0;
++ return (Elf_Addr) tramp;
+ }
+
+-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
++static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
++{
++ if (is_phys_addr(location))
++ return add_plt_entry_to(&me->arch.phys_plt_offset,
++ me->arch.phys_plt_tbl, v);
++ else
++ return add_plt_entry_to(&me->arch.virt_plt_offset,
++ me->arch.virt_plt_tbl, v);
++
++}
++
++static int set_r_mips_26(struct module *me, u32 *location, u32 ofs, Elf_Addr v)
+ {
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
+@@ -102,17 +315,31 @@ static int apply_r_mips_26_rela(struct m
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- printk(KERN_ERR
++ v = add_plt_entry(me, location, v + (ofs << 2));
++ if (!v) {
++ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+- return -ENOEXEC;
++ return -ENOEXEC;
++ }
++ ofs = 0;
+ }
+
+- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
++ *location = (*location & ~0x03ffffff) | ((ofs + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+ }
+
++static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
++{
++ return set_r_mips_26(me, location, *location & 0x03ffffff, v);
++}
++
++static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
++{
++ return set_r_mips_26(me, location, 0, v);
++}
++
+ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
+ {
+ struct mips_hi16 *n;
+@@ -380,11 +607,32 @@ int module_finalize(const Elf_Ehdr *hdr,
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
++
++ /* Get rid of the fixup trampoline if we're running the module
++ * from physically mapped address space */
++ if (me->arch.phys_plt_offset == 0) {
++ __module_free(me->arch.phys_plt_tbl);
++ me->arch.phys_plt_tbl = NULL;
++ }
++ if (me->arch.virt_plt_offset == 0) {
++ __module_free(me->arch.virt_plt_tbl);
++ me->arch.virt_plt_tbl = NULL;
++ }
++
+ return 0;
+ }
+
+ void module_arch_cleanup(struct module *mod)
+ {
++ if (mod->arch.phys_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ }
++ if (mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.virt_plt_tbl);
++ mod->arch.virt_plt_tbl = NULL;
++ }
++
+ spin_lock_irq(&dbe_lock);
+ list_del(&mod->arch.dbe_list);
+ spin_unlock_irq(&dbe_lock);
diff --git a/target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch b/target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch
new file mode 100644
index 0000000..e432471
--- /dev/null
+++ b/target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch
@@ -0,0 +1,83 @@
+--- a/arch/mips/include/asm/string.h
++++ b/arch/mips/include/asm/string.h
+@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__
+
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *__s, int __c, size_t __count);
++#define memset(__s, __c, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memset((__s), (__c), __len); \
++ else \
++ __ret = __builtin_memset((__s), (__c), __len); \
++ __ret; \
++})
+
+ #define __HAVE_ARCH_MEMCPY
+ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
++#define memcpy(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memcpy((dst), (src), __len); \
++ else \
++ __ret = __builtin_memcpy((dst), (src), __len); \
++ __ret; \
++})
+
+ #define __HAVE_ARCH_MEMMOVE
+ extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
++#define memmove(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memmove((dst), (src), __len); \
++ else \
++ __ret = __builtin_memmove((dst), (src), __len); \
++ __ret; \
++})
++
++#define __HAVE_ARCH_MEMCMP
++#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
+
+ #endif /* _ASM_STRING_H */
+--- a/arch/mips/lib/Makefile
++++ b/arch/mips/lib/Makefile
+@@ -3,7 +3,7 @@
+ #
+
+ lib-y += csum_partial.o delay.o memcpy.o memcpy-inatomic.o memset.o \
+- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
++ strlen_user.o strncpy_user.o strnlen_user.o uncached.o memcmp.o
+
+ obj-y += iomap.o
+ obj-$(CONFIG_PCI) += iomap-pci.o
+--- /dev/null
++++ b/arch/mips/lib/memcmp.c
+@@ -0,0 +1,22 @@
++/*
++ * copied from linux/lib/string.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++#include <linux/module.h>
++#include <linux/string.h>
++
++#undef memcmp
++int memcmp(const void *cs, const void *ct, size_t count)
++{
++ const unsigned char *su1, *su2;
++ int res = 0;
++
++ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
++ if ((res = *su1 - *su2) != 0)
++ break;
++ return res;
++}
++EXPORT_SYMBOL(memcmp);
++
diff --git a/target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch b/target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch
new file mode 100644
index 0000000..f83c96b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch
@@ -0,0 +1,35 @@
+--- a/arch/mips/oprofile/op_model_mipsxx.c
++++ b/arch/mips/oprofile/op_model_mipsxx.c
+@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
+ }
+ }
+
++static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
++{
++ return mipsxx_perfcount_handler();
++}
++
+ static int __init mipsxx_init(void)
+ {
+ int counters;
+@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_perfcount_handler;
+
++ if (cp0_perfcount_irq >= 0)
++ return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
++ IRQF_SHARED, "Perfcounter", save_perf_irq);
++
+ return 0;
+ }
+
+@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
+ {
+ int counters = op_model_mipsxx_ops.num_counters;
+
++ if (cp0_perfcount_irq >= 0)
++ free_irq(cp0_perfcount_irq, save_perf_irq);
++
+ counters = counters_per_cpu_to_total(counters);
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
diff --git a/target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch
new file mode 100644
index 0000000..d1eba55
--- /dev/null
+++ b/target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch
@@ -0,0 +1,13 @@
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -81,6 +81,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
+ return -ENOEXEC;
+ }
+
++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
++ ELF_ST_BIND(sym->st_info) == STB_WEAK)
++ continue;
++
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
diff --git a/target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch b/target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch
new file mode 100644
index 0000000..3f67f1b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch
@@ -0,0 +1,31 @@
+Upstream doesn't optimize the kernel and bootwrappers for ppc44x because
+they still want to support gcc 3.3 -- well, we don't.
+
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -130,7 +130,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
+ KBUILD_CFLAGS += -mno-sched-epilog
+ endif
+
+-cpu-as-$(CONFIG_4xx) += -Wa,-m405
++cpu-as-$(CONFIG_40x) += -Wa,-m405
++cpu-as-$(CONFIG_44x) += -Wa,-m440
+ cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
+ cpu-as-$(CONFIG_E500) += -Wa,-me500
+ cpu-as-$(CONFIG_E200) += -Wa,-me200
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -38,10 +38,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob
+ DTC_FLAGS ?= -p 1024
+
+ $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
+-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
++$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
+ $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
+-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
+-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
++$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
++$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
+ $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
+ $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+ $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
diff --git a/target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch b/target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch
new file mode 100644
index 0000000..e5d383c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch
@@ -0,0 +1,56 @@
+--- a/Makefile
++++ b/Makefile
+@@ -375,6 +375,7 @@ KBUILD_AFLAGS := -D__ASSEMBLY__
+ KBUILD_AFLAGS_MODULE := -DMODULE
+ KBUILD_CFLAGS_MODULE := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
++KBUILD_LDFLAGS_MODULE_PREREQ :=
+
+ # Read KERNELRELEASE from include/config/kernel.release (if it exists)
+ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
+@@ -384,7 +385,7 @@ export VERSION PATCHLEVEL SUBLEVEL KERNE
+ export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
+ export CPP AR NM STRIP OBJCOPY OBJDUMP
+ export MAKE AWK GENKSYMS INSTALLKERNEL PERL UTS_MACHINE
+-export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
++export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE KBUILD_LDFLAGS_MODULE_PREREQ CHECK CHECKFLAGS
+
+ export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
+ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -94,7 +94,7 @@ else
+ endif
+ endif
+
+-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
++KBUILD_LDFLAGS_MODULE_PREREQ += arch/powerpc/lib/crtsavres.o
+
+ ifeq ($(CONFIG_TUNE_CELL),y)
+ KBUILD_CFLAGS += $(call cc-option,-mtune=cell)
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -110,7 +110,14 @@ quiet_cmd_cc_o_c = CC $@
+ cmd_cc_o_c = $(CC) $(c_flags) $(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE) \
+ -c -o $@ $<
+
+-$(modules:.ko=.mod.o): %.mod.o: %.mod.c FORCE
++quiet_cmd_as_o_S = AS $(quiet_modtag) $@
++cmd_as_o_S = $(CC) $(a_flags) $(AFLAGS_MODULE) -c -o $@ $<
++
++$(KBUILD_LDFLAGS_MODULE_PREREQ): %.o: %.S FORCE
++ $(Q)mkdir -p $(dir $@)
++ $(call if_changed_dep,as_o_S)
++
++$(modules:.ko=.mod.o): %.mod.o: %.mod.c $(KBUILD_LDFLAGS_MODULE_PREREQ) FORCE
+ $(call if_changed_dep,cc_o_c)
+
+ targets += $(modules:.ko=.mod.o)
+@@ -119,6 +126,7 @@ targets += $(modules:.ko=.mod.o)
+ quiet_cmd_ld_ko_o = LD [M] $@
+ cmd_ld_ko_o = $(LD) -r $(LDFLAGS) \
+ $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
++ $(KBUILD_LDFLAGS_MODULE_PREREQ) \
+ -o $@ $(filter-out FORCE,$^)
+
+ $(modules): %.ko :%.o %.mod.o FORCE
diff --git a/target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch b/target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch
new file mode 100644
index 0000000..7a79c41
--- /dev/null
+++ b/target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch
@@ -0,0 +1,616 @@
+From eee16330c9de9adf7880cce9f1d32e13f89706bb Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Tue, 11 Jan 2011 13:16:47 +0000
+Subject: MIPS: Add crash and kdump support
+
+From: http://patchwork.linux-mips.org/patch/1025/
+
+Hello folks,
+
+Please find here MIPS crash and kdump patches.
+This is patch set of 3 patches:
+1. generic MIPS changes (kernel);
+2. MIPS Cavium Octeon board kexec/kdump code (kernel);
+3. Kexec user space MIPS changes.
+
+Patches were tested on the latest linux-mips@ git kernel and the latest
+kexec-tools git on Cavium Octeon 50xx board.
+
+I also made the same code working on RMI XLR/XLS boards for both
+mips32 and mips64 kernels.
+
+Best regards,
+Maxim Uvarov.
+
+------
+[ Zhangjin: Several trivial building failure has been fixed.
+
+Note: the 2nd patch can not be cleanly applied, but may be a good
+reference for the other board development:
+
+ + MIPS Cavium Octeon board kexec,kdump support
+ http://patchwork.linux-mips.org/patch/1026/
+
+And the 3rd patch has already been merged into the mainline kexec-tools:
+
+ + some kexec MIPS improvements
+ http://patchwork.linux-mips.org/patch/1027/
+
+kexec-tools is available here:
+
+ + http://horms.net/projects/kexec/
+ git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+]
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+(limited to 'arch/mips/kernel')
+
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -97,7 +97,8 @@ obj-$(CONFIG_I8253) += i8253.o
+
+ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
+-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
++obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
++obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+ obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+--- /dev/null
++++ b/arch/mips/kernel/crash.c
+@@ -0,0 +1,75 @@
++#include <linux/kernel.h>
++#include <linux/smp.h>
++#include <linux/reboot.h>
++#include <linux/kexec.h>
++#include <linux/bootmem.h>
++#include <linux/crash_dump.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/irq.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++
++#ifdef CONFIG_CRASH_DUMP
++unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
++#endif
++
++/* This keeps a track of which one is crashing cpu. */
++int crashing_cpu = -1;
++static cpumask_t cpus_in_crash = CPU_MASK_NONE;
++
++#ifdef CONFIG_SMP
++void crash_shutdown_secondary(void *ignore)
++{
++ struct pt_regs *regs;
++ int cpu = smp_processor_id();
++
++ regs = task_pt_regs(current);
++
++ if (!cpu_online(cpu))
++ return;
++
++ local_irq_disable();
++ if (!cpu_isset(cpu, cpus_in_crash))
++ crash_save_cpu(regs, cpu);
++ cpu_set(cpu, cpus_in_crash);
++
++ while (!atomic_read(&kexec_ready_to_reboot))
++ cpu_relax();
++ relocated_kexec_smp_wait(NULL);
++ /* NOTREACHED */
++}
++
++static void crash_kexec_prepare_cpus(void)
++{
++ unsigned int msecs;
++
++ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
++
++ dump_send_ipi(crash_shutdown_secondary);
++ smp_wmb();
++
++ /*
++ * The crash CPU sends an IPI and wait for other CPUs to
++ * respond. Delay of at least 10 seconds.
++ */
++ printk(KERN_EMERG "Sending IPI to other cpus...\n");
++ msecs = 10000;
++ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
++ cpu_relax();
++ mdelay(1);
++ }
++}
++
++#else
++static void crash_kexec_prepare_cpus(void) {}
++#endif
++
++void default_machine_crash_shutdown(struct pt_regs *regs)
++{
++ local_irq_disable();
++ crashing_cpu = smp_processor_id();
++ crash_save_cpu(regs, crashing_cpu);
++ crash_kexec_prepare_cpus();
++ cpu_set(crashing_cpu, cpus_in_crash);
++}
+--- /dev/null
++++ b/arch/mips/kernel/crash_dump.c
+@@ -0,0 +1,86 @@
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/crash_dump.h>
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_PROC_VMCORE
++static int __init parse_elfcorehdr(char *p)
++{
++ if (p)
++ elfcorehdr_addr = memparse(p, &p);
++ return 1;
++}
++__setup("elfcorehdr=", parse_elfcorehdr);
++#endif
++
++static int __init parse_savemaxmem(char *p)
++{
++ if (p)
++ saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
++
++ return 1;
++}
++__setup("savemaxmem=", parse_savemaxmem);
++
++
++static void *kdump_buf_page;
++
++/**
++ * copy_oldmem_page - copy one page from "oldmem"
++ * @pfn: page frame number to be copied
++ * @buf: target memory address for the copy; this can be in kernel address
++ * space or user address space (see @userbuf)
++ * @csize: number of bytes to copy
++ * @offset: offset in bytes into the page (based on pfn) to begin the copy
++ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
++ * otherwise @buf is in kernel address space, use memcpy().
++ *
++ * Copy a page from "oldmem". For this page, there is no pte mapped
++ * in the current kernel.
++ *
++ * Calling copy_to_user() in atomic context is not desirable. Hence first
++ * copying the data to a pre-allocated kernel page and then copying to user
++ * space in non-atomic context.
++ */
++ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
++ size_t csize, unsigned long offset, int userbuf)
++{
++ void *vaddr;
++
++ if (!csize)
++ return 0;
++
++ vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
++
++ if (!userbuf) {
++ memcpy(buf, (vaddr + offset), csize);
++ kunmap_atomic(vaddr, KM_PTE0);
++ } else {
++ if (!kdump_buf_page) {
++ printk(KERN_WARNING "Kdump: Kdump buffer page not"
++ " allocated\n");
++ return -EFAULT;
++ }
++ copy_page(kdump_buf_page, vaddr);
++ kunmap_atomic(vaddr, KM_PTE0);
++ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
++ return -EFAULT;
++ }
++
++ return csize;
++}
++
++static int __init kdump_buf_page_init(void)
++{
++ int ret = 0;
++
++ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!kdump_buf_page) {
++ printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
++ " page\n");
++ ret = -ENOMEM;
++ }
++
++ return ret;
++}
++arch_initcall(kdump_buf_page_init);
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_
+ extern unsigned long kexec_start_address;
+ extern unsigned long kexec_indirection_page;
+
++int (*_machine_kexec_prepare)(struct kimage *) = NULL;
++void (*_machine_kexec_shutdown)(void) = NULL;
++void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
++#ifdef CONFIG_SMP
++void (*relocated_kexec_smp_wait) (void *);
++atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
++#endif
++
+ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
++ if (_machine_kexec_prepare)
++ return _machine_kexec_prepare(kimage);
+ return 0;
+ }
+
+@@ -33,11 +43,17 @@ machine_kexec_cleanup(struct kimage *kim
+ void
+ machine_shutdown(void)
+ {
++ if (_machine_kexec_shutdown)
++ _machine_kexec_shutdown();
+ }
+
+ void
+ machine_crash_shutdown(struct pt_regs *regs)
+ {
++ if (_machine_crash_shutdown)
++ _machine_crash_shutdown(regs);
++ else
++ default_machine_crash_shutdown(regs);
+ }
+
+ typedef void (*noretfun_t)(void) __attribute__((noreturn));
+@@ -52,7 +68,9 @@ machine_kexec(struct kimage *image)
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+- kexec_start_address = (unsigned long) phys_to_virt(image->start);
++ kexec_start_address =
++ (unsigned long) phys_to_virt(image->start);
++
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+
+@@ -63,7 +81,7 @@ machine_kexec(struct kimage *image)
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through KSEG0 (or
+ * CKSEG0 or XPHYS if on 64bit system), hence the
+- * pys_to_virt() call.
++ * phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+@@ -81,5 +99,13 @@ machine_kexec(struct kimage *image)
+ printk("Will call new kernel at %08lx\n", image->start);
+ printk("Bye ...\n");
+ __flush_cache_all();
++#ifdef CONFIG_SMP
++ /* All secondary cpus now may jump to kexec_wait cycle */
++ relocated_kexec_smp_wait = reboot_code_buffer +
++ (void *)(kexec_smp_wait - relocate_new_kernel);
++ smp_wmb();
++ atomic_set(&kexec_ready_to_reboot, 1);
++#endif
+ ((noretfun_t) reboot_code_buffer)();
+ }
++
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -15,6 +15,11 @@
+ #include <asm/addrspace.h>
+
+ LEAF(relocate_new_kernel)
++ PTR_L a0, arg0
++ PTR_L a1, arg1
++ PTR_L a2, arg2
++ PTR_L a3, arg3
++
+ PTR_L s0, kexec_indirection_page
+ PTR_L s1, kexec_start_address
+
+@@ -26,7 +31,6 @@ process_entry:
+ and s3, s2, 0x1
+ beq s3, zero, 1f
+ and s4, s2, ~0x1 /* store destination addr in s4 */
+- move a0, s4
+ b process_entry
+
+ 1:
+@@ -60,23 +64,100 @@ copy_word:
+ b process_entry
+
+ done:
++#ifdef CONFIG_SMP
++ /* kexec_flag reset is signal to other CPUs what kernel
++ was moved to it's location. Note - we need relocated address
++ of kexec_flag. */
++
++ bal 1f
++ 1: move t1,ra;
++ PTR_LA t2,1b
++ PTR_LA t0,kexec_flag
++ PTR_SUB t0,t0,t2;
++ PTR_ADD t0,t1,t0;
++ LONG_S zero,(t0)
++#endif
++
++ sync
+ /* jump to kexec_start_address */
+ j s1
+ END(relocate_new_kernel)
+
+-kexec_start_address:
+- EXPORT(kexec_start_address)
++#ifdef CONFIG_SMP
++/*
++ * Other CPUs should wait until code is relocated and
++ * then start at entry (?) point.
++ */
++LEAF(kexec_smp_wait)
++ PTR_L a0, s_arg0
++ PTR_L a1, s_arg1
++ PTR_L a2, s_arg2
++ PTR_L a3, s_arg3
++ PTR_L s1, kexec_start_address
++
++ /* Non-relocated address works for args and kexec_start_address ( old
++ * kernel is not overwritten). But we need relocated address of
++ * kexec_flag.
++ */
++
++ bal 1f
++1: move t1,ra;
++ PTR_LA t2,1b
++ PTR_LA t0,kexec_flag
++ PTR_SUB t0,t0,t2;
++ PTR_ADD t0,t1,t0;
++
++1: LONG_L s0, (t0)
++ bne s0, zero,1b
++
++ sync
++ j s1
++ END(kexec_smp_wait)
++#endif
++
++#ifdef __mips64
++ /* all PTR's must be aligned to 8 byte in 64-bit mode */
++ .align 3
++#endif
++
++/* All parameters to new kernel are passed in registers a0-a3.
++ * kexec_args[0..3] are uses to prepare register values.
++ */
++
++EXPORT(kexec_args)
++arg0: PTR 0x0
++arg1: PTR 0x0
++arg2: PTR 0x0
++arg3: PTR 0x0
++ .size kexec_args,PTRSIZE*4
++
++#ifdef CONFIG_SMP
++/*
++ * Secondary CPUs may have different kernel parameters in
++ * their registers a0-a3. secondary_kexec_args[0..3] are used
++ * to prepare register values.
++ */
++EXPORT(secondary_kexec_args)
++s_arg0: PTR 0x0
++s_arg1: PTR 0x0
++s_arg2: PTR 0x0
++s_arg3: PTR 0x0
++ .size secondary_kexec_args,PTRSIZE*4
++kexec_flag:
++ LONG 0x1
++
++#endif
++
++EXPORT(kexec_start_address)
+ PTR 0x0
+ .size kexec_start_address, PTRSIZE
+
+-kexec_indirection_page:
+- EXPORT(kexec_indirection_page)
++EXPORT(kexec_indirection_page)
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+
+ relocate_new_kernel_end:
+
+-relocate_new_kernel_size:
+- EXPORT(relocate_new_kernel_size)
++EXPORT(relocate_new_kernel_size)
+ PTR relocate_new_kernel_end - relocate_new_kernel
+ .size relocate_new_kernel_size, PTRSIZE
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -22,6 +22,7 @@
+ #include <linux/console.h>
+ #include <linux/pfn.h>
+ #include <linux/debugfs.h>
++#include <linux/kexec.h>
+
+ #include <asm/addrspace.h>
+ #include <asm/bootinfo.h>
+@@ -523,12 +524,62 @@ static void __init arch_mem_init(char **
+ }
+
+ bootmem_init();
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1,
++ BOOTMEM_DEFAULT);
++#endif
+ device_tree_init();
+ sparse_init();
+ plat_swiotlb_setup();
+ paging_init();
+ }
+
++#ifdef CONFIG_KEXEC
++static inline unsigned long long get_total_mem(void)
++{
++ unsigned long long total;
++ total = max_pfn - min_low_pfn;
++ return total << PAGE_SHIFT;
++}
++
++static void __init mips_parse_crashkernel(void)
++{
++ unsigned long long total_mem;
++ unsigned long long crash_size, crash_base;
++ int ret;
++
++ total_mem = get_total_mem();
++ ret = parse_crashkernel(boot_command_line, total_mem,
++ &crash_size, &crash_base);
++ if (ret != 0 || crash_size <= 0)
++ return;
++
++ crashk_res.start = crash_base;
++ crashk_res.end = crash_base + crash_size - 1;
++}
++static void __init request_crashkernel(struct resource *res)
++{
++ int ret;
++
++ ret = request_resource(res, &crashk_res);
++ if (!ret)
++ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
++ "for crashkernel\n",
++ (unsigned long)((crashk_res.end -
++ crashk_res.start + 1) >> 20),
++ (unsigned long)(crashk_res.start >> 20));
++}
++#else
++static void __init mips_parse_crashkernel(void)
++{
++}
++static void __init request_crashkernel(struct resource *res)
++{
++}
++#endif
++
+ static void __init resource_init(void)
+ {
+ int i;
+@@ -544,6 +595,8 @@ static void __init resource_init(void)
+ /*
+ * Request address space for all standard RAM.
+ */
++ mips_parse_crashkernel();
++
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ struct resource *res;
+ unsigned long start, end;
+@@ -580,6 +633,7 @@ static void __init resource_init(void)
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
++ request_crashkernel(res);
+ }
+ }
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -433,3 +433,21 @@ void flush_tlb_one(unsigned long vaddr)
+
+ EXPORT_SYMBOL(flush_tlb_page);
+ EXPORT_SYMBOL(flush_tlb_one);
++
++#if defined(CONFIG_KEXEC)
++void (*dump_ipi_function_ptr)(void *) = NULL;
++void dump_send_ipi(void (*dump_ipi_callback)(void *))
++{
++ int i;
++ int cpu = smp_processor_id();
++
++ dump_ipi_function_ptr = dump_ipi_callback;
++ smp_mb();
++ for_each_online_cpu(i)
++ if (i != cpu)
++ core_send_ipi(i, SMP_DUMP);
++
++}
++EXPORT_SYMBOL(dump_send_ipi);
++#endif
++
+--- a/arch/mips/include/asm/kexec.h
++++ b/arch/mips/include/asm/kexec.h
+@@ -9,22 +9,45 @@
+ #ifndef _MIPS_KEXEC
+ # define _MIPS_KEXEC
+
++#include <asm/stacktrace.h>
++
++extern unsigned long long elfcorehdr_addr;
++
+ /* Maximum physical address we can use pages from */
+ #define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
+ /* Maximum address we can reach in physical address mode */
+ #define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
+ /* Maximum address we can use for the control code buffer */
+ #define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
+-
+-#define KEXEC_CONTROL_PAGE_SIZE 4096
++/* Reserve 3*4096 bytes for board-specific info */
++#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
+
+ /* The native architecture */
+ #define KEXEC_ARCH KEXEC_ARCH_MIPS
++#define MAX_NOTE_BYTES 1024
+
+ static inline void crash_setup_regs(struct pt_regs *newregs,
+- struct pt_regs *oldregs)
++ struct pt_regs *oldregs)
+ {
+- /* Dummy implementation for now */
++ if (oldregs)
++ memcpy(newregs, oldregs, sizeof(*newregs));
++ else
++ prepare_frametrace(newregs);
+ }
+
++#ifdef CONFIG_KEXEC
++struct kimage;
++extern unsigned long kexec_args[4];
++extern int (*_machine_kexec_prepare)(struct kimage *);
++extern void (*_machine_kexec_shutdown)(void);
++extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
++extern void default_machine_crash_shutdown(struct pt_regs *regs);
++#ifdef CONFIG_SMP
++extern const unsigned char kexec_smp_wait[];
++extern unsigned long secondary_kexec_args[4];
++extern void (*relocated_kexec_smp_wait) (void *);
++extern atomic_t kexec_ready_to_reboot;
++#endif
++#endif
++
+ #endif /* !_MIPS_KEXEC */
+--- a/arch/mips/include/asm/smp.h
++++ b/arch/mips/include/asm/smp.h
+@@ -40,6 +40,8 @@ extern int __cpu_logical_map[NR_CPUS];
+ #define SMP_CALL_FUNCTION 0x2
+ /* Octeon - Tell another core to flush its icache */
+ #define SMP_ICACHE_FLUSH 0x4
++/* Used by kexec crashdump to save all cpu's state */
++#define SMP_DUMP 0x8
+
+ extern volatile cpumask_t cpu_callin_map;
+
+@@ -91,4 +93,9 @@ static inline void arch_send_call_functi
+ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
+ }
+
++extern void core_send_ipi(int cpu, unsigned int action);
++#if defined(CONFIG_KEXEC)
++extern void (*dump_ipi_function_ptr)(void *);
++void dump_send_ipi(void (*dump_ipi_callback)(void *));
++#endif
+ #endif /* __ASM_SMP_H */
diff --git a/target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch b/target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch
new file mode 100644
index 0000000..5ffc2e29
--- /dev/null
+++ b/target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch
@@ -0,0 +1,159 @@
+From 03cd81fbca6b91317ec1a7b3b3c09fb8d08f83a6 Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Tue, 11 Jan 2011 18:42:08 +0000
+Subject: MIPS: Kexec: Enhance the support
+
+Changes:
+ o Print more information in machine_kexec() for debugging
+ E.g. with this information, the kexec_start_address has been found
+ it was wrong with 64bit kernel / o32 kexec-tools. Which must be
+ fixed later.
+ o Link relocate_kernel.S to a section for future extension
+ This allows more functions can be added for the kexec relocation
+ part even written in C. to add code into that section, you just need
+ to mark your function or data with __kexec or
+ __attribute__((__section__(".__kexec.relocate")))
+
+TODO:
+
+1. Make 64bit kernel / o32|n32|64 kexec-tools works
+
+Fix the user-space kexec-tools, seems the tool only work for 32bit
+machine. So, we need to add 64bit support for it. The address of the
+entry point(kexec_start_address) is wrong and make the "kexec -e" fail.
+the real entry point must be read from the new kernel image by the
+user-space kexec-tools, otherwise, it will not work. The above 64bit
+support tested is 64bit kernel with o32 user-space kexec-tools. The root
+cause may be the different definition of virt_to_phys() and
+phys_to_virt() in the kexec-tools and kernel space for 64bit system /
+o32 kernel.
+
+Ref: http://www.linux-mips.org/archives/linux-mips/2009-08/msg00149.html
+
+2. Pass the arguments from kexec-tools to the new kernel image
+
+Please refer to: "MIPS: Loongson: Kexec: Pass parameters to new kernel"
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+--- a/arch/mips/include/asm/kexec.h
++++ b/arch/mips/include/asm/kexec.h
+@@ -36,6 +36,16 @@ static inline void crash_setup_regs(stru
+ }
+
+ #ifdef CONFIG_KEXEC
++
++#define __kexec __attribute__((__section__(".__kexec.relocate")))
++
++/* The linker tells us where the relocate_new_kernel part is. */
++extern const unsigned char __start___kexec_relocate;
++extern const unsigned char __end___kexec_relocate;
++
++extern unsigned long kexec_start_address;
++extern unsigned long kexec_indirection_page;
++
+ struct kimage;
+ extern unsigned long kexec_args[4];
+ extern int (*_machine_kexec_prepare)(struct kimage *);
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -13,12 +13,6 @@
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+
+-extern const unsigned char relocate_new_kernel[];
+-extern const size_t relocate_new_kernel_size;
+-
+-extern unsigned long kexec_start_address;
+-extern unsigned long kexec_indirection_page;
+-
+ int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+ void (*_machine_kexec_shutdown)(void) = NULL;
+ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+@@ -61,21 +55,34 @@ typedef void (*noretfun_t)(void) __attri
+ void
+ machine_kexec(struct kimage *image)
+ {
++ unsigned long kexec_relocate_size;
+ unsigned long reboot_code_buffer;
+ unsigned long entry;
+ unsigned long *ptr;
+
++ kexec_relocate_size = (unsigned long)(&__end___kexec_relocate) -
++ (unsigned long)(&__start___kexec_relocate);
++ pr_info("kexec_relocate_size = %lu\n", kexec_relocate_size);
++
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
++ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
++ pr_info("kexec_start_address(entry point of new kernel) = %p\n",
++ (void *)kexec_start_address);
+
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
++ pr_info("kexec_indirection_page = %p\n",
++ (void *)kexec_indirection_page);
+
+- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
++ memcpy((void *)reboot_code_buffer, &__start___kexec_relocate,
++ kexec_relocate_size);
++
++ pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
++ &__start___kexec_relocate, (void *)reboot_code_buffer);
+
+ /*
+ * The generic kexec code builds a page list with physical
+@@ -96,8 +103,8 @@ machine_kexec(struct kimage *image)
+ */
+ local_irq_disable();
+
+- printk("Will call new kernel at %08lx\n", image->start);
+- printk("Bye ...\n");
++ pr_info("Will call new kernel at %p\n", (void *)kexec_start_address);
++ pr_info("Bye ...\n");
+ __flush_cache_all();
+ #ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+@@ -108,4 +115,3 @@ machine_kexec(struct kimage *image)
+ #endif
+ ((noretfun_t) reboot_code_buffer)();
+ }
+-
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -14,6 +14,8 @@
+ #include <asm/stackframe.h>
+ #include <asm/addrspace.h>
+
++ .section .kexec.relocate, "ax"
++
+ LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+@@ -155,9 +157,3 @@ EXPORT(kexec_start_address)
+ EXPORT(kexec_indirection_page)
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+-
+-relocate_new_kernel_end:
+-
+-EXPORT(relocate_new_kernel_size)
+- PTR relocate_new_kernel_end - relocate_new_kernel
+- .size relocate_new_kernel_size, PTRSIZE
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -50,6 +50,10 @@ SECTIONS
+ *(.text.*)
+ *(.fixup)
+ *(.gnu.warning)
++ __start___kexec_relocate = .;
++ KEEP(*(.kexec.relocate))
++ KEEP(*(.__kexec.relocate))
++ __end___kexec_relocate = .;
+ } :text = 0
+ _etext = .; /* End of text section */
+
diff --git a/target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch b/target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch
new file mode 100644
index 0000000..5507dde
--- /dev/null
+++ b/target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch
@@ -0,0 +1,52 @@
+From 49d07a29653b1f2c6ae273b3d8fe93d981f43004 Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Wed, 12 Jan 2011 20:59:32 +0000
+Subject: MIPS: Kexec: Init the arguments for the new kernel image
+
+Whenever the kexec-tools pass the command lines to the new kernel image,
+init the arguments as the ones for the 1st kernel image. This fixed the
+booting failure of Kexec on YeeLoong.
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -10,6 +10,7 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+
++#include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+
+@@ -21,9 +22,30 @@ void (*relocated_kexec_smp_wait) (void *
+ atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+ #endif
+
++static void machine_kexec_init_args(void)
++{
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ pr_info("kexec_args[0] (argc): %lu\n", kexec_args[0]);
++ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
++ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
++ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
++}
++
+ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
++ /*
++ * Whenever arguments passed from kexec-tools, Init the arguments as
++ * the original ones to avoid booting failure.
++ *
++ * This can be overrided by _machine_kexec_prepare().
++ */
++ machine_kexec_init_args();
++
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+ return 0;
diff --git a/target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch b/target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch
new file mode 100644
index 0000000..9da9363
--- /dev/null
+++ b/target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch
@@ -0,0 +1,88 @@
+From 240c76841b26f1b09aaced33414ee1d08b6454cf Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Sat, 15 Jan 2011 12:46:03 +0000
+Subject: MIPS: Get kernel parameters from kexec-tools
+
+Before, we simply use the command lines from the original bootloader,
+but it is not convenient. Now, we accept the kernel parameters from the
+--command-line or --append option of the kexec-tools. But If not
+--command-line or --apend option indicated, will fall back to use the
+ones from the original bootloader.
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -13,6 +13,7 @@
+ #include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
++#include <asm/uaccess.h>
+
+ int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+ void (*_machine_kexec_shutdown)(void) = NULL;
+@@ -35,6 +36,56 @@ static void machine_kexec_init_args(void
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+ }
+
++#define ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
++
++int machine_kexec_pass_args(struct kimage *image)
++{
++ int i, argc = 0;
++ char *bootloader = "kexec";
++ int *kexec_argv = (int *)kexec_args[1];
++
++ for (i = 0; i < image->nr_segments; i++) {
++ if (!strncmp(bootloader, (char *)image->segment[i].buf,
++ strlen(bootloader))) {
++ /*
++ * convert command line string to array
++ * of parameters (as bootloader does).
++ */
++ /*
++ * Note: we do treat the 1st string "kexec" as an
++ * argument ;-) so, argc here is 1.
++ */
++ char *str = (char *)image->segment[i].buf;
++ char *ptr = strchr(str, ' ');
++ char *kbuf = (char *)kexec_argv[0];
++ /* Whenever --command-line or --append used, "kexec" is copied */
++ argc = 1;
++ /* Parse the offset */
++ while (ptr && (ARGV_MAX_ARGS > argc)) {
++ *ptr = '\0';
++ if (ptr[1] != ' ' && ptr[1] != '\0') {
++ int offt = (int)(ptr - str + 1);
++ kexec_argv[argc] = (int)kbuf + offt;
++ argc++;
++ }
++ ptr = strchr(ptr + 1, ' ');
++ }
++ if (argc > 1) {
++ /* Copy to kernel space */
++ copy_from_user(kbuf, (char *)image->segment[i].buf, image->segment[i].bufsz);
++ fw_arg0 = kexec_args[0] = argc;
++ }
++ break;
++ }
++ }
++
++ pr_info("argc = %lu\n", kexec_args[0]);
++ for (i = 0; i < kexec_args[0]; i++)
++ pr_info("argv[%d] = %p, %s\n", i, (char *)kexec_argv[i], (char *)kexec_argv[i]);
++
++ return 0;
++}
++
+ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
+@@ -45,6 +96,7 @@ machine_kexec_prepare(struct kimage *kim
+ * This can be overrided by _machine_kexec_prepare().
+ */
+ machine_kexec_init_args();
++ machine_kexec_pass_args(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
diff --git a/target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch b/target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch
new file mode 100644
index 0000000..46a7395
--- /dev/null
+++ b/target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch
@@ -0,0 +1,83 @@
+From 4aded085fa0057a9a1e1dcec631f950307360c1f Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Tue, 11 Jan 2011 13:46:19 +0000
+Subject: MIPS: Fix compiling failure of relocate_kernel.S
+
+The following errors is fixed with the help of <asm/asm_nosec.h>. for
+this file need to put different symbols in the same section, the
+original LEAF, NESTED and EXPORT (without explicit section indication)
+must be used, <asm/asm_nosec.h> does it.
+
+arch/mips/kernel/relocate_kernel.S: Assembler messages:
+arch/mips/kernel/relocate_kernel.S:162: Error: operation combines symbols in different segments
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+(limited to 'arch/mips/kernel')
+
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -7,6 +7,7 @@
+ */
+
+ #include <asm/asm.h>
++#include <asm/asm_nosec.h>
+ #include <asm/asmmacro.h>
+ #include <asm/regdef.h>
+ #include <asm/page.h>
+--- /dev/null
++++ b/arch/mips/include/asm/asm_nosec.h
+@@ -0,0 +1,53 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
++ * Copyright (C) 1999 by Silicon Graphics, Inc.
++ * Copyright (C) 2001 MIPS Technologies, Inc.
++ * Copyright (C) 2002 Maciej W. Rozycki
++ * Copyright (C) 2010 Wu Zhangjin <wuzhangjin@gmail.com>
++ *
++ * Derive from <asm/asm.h>
++ *
++ * Override the macros without -ffunction-sections and -fdata-sections support.
++ * If several functions or data must be put in the same section, please include
++ * this header file after the <asm/asm.h> to override the generic definition.
++ */
++
++#ifndef __ASM_ASM_NOSEC_H
++#define __ASM_ASM_NOSEC_H
++
++#undef LEAF
++#undef NESTED
++#undef EXPORT
++
++/*
++ * LEAF - declare leaf routine
++ */
++#define LEAF(symbol) \
++ .globl symbol; \
++ .align 2; \
++ .type symbol, @function; \
++ .ent symbol, 0; \
++symbol: .frame sp, 0, ra
++
++/*
++ * NESTED - declare nested routine entry point
++ */
++#define NESTED(symbol, framesize, rpc) \
++ .globl symbol; \
++ .align 2; \
++ .type symbol, @function; \
++ .ent symbol, 0; \
++symbol: .frame sp, framesize, rpc
++
++/*
++ * EXPORT - export definition of symbol
++ */
++#define EXPORT(symbol) \
++ .globl symbol; \
++symbol:
++
++#endif /* __ASM_ASM_NOSEC_H */
diff --git a/target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch b/target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch
new file mode 100644
index 0000000..abc8971
--- /dev/null
+++ b/target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch
@@ -0,0 +1,186 @@
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -23,67 +23,104 @@ void (*relocated_kexec_smp_wait) (void *
+ atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+ #endif
+
+-static void machine_kexec_init_args(void)
++#define KEXEC_MIPS_ARGV_BUF_SIZE COMMAND_LINE_SIZE
++#define KEXEC_MIPS_ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
++
++char kexec_argv_buf[KEXEC_MIPS_ARGV_BUF_SIZE] __kexec;
++int kexec_argv[KEXEC_MIPS_ARGV_MAX_ARGS] __kexec;
++
++static void
++machine_kexec_print_args(void)
+ {
+- kexec_args[0] = fw_arg0;
+- kexec_args[1] = fw_arg1;
+- kexec_args[2] = fw_arg2;
+- kexec_args[3] = fw_arg3;
++ int i;
+
+ pr_info("kexec_args[0] (argc): %lu\n", kexec_args[0]);
+ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
+ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+-}
+
+-#define ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
++ for (i = 0; i < kexec_args[0]; i++)
++ pr_info("kexec_argv[%d] = %p, %s\n", i,
++ (char *)kexec_argv[i], (char *)kexec_argv[i]);
++}
+
+-int machine_kexec_pass_args(struct kimage *image)
++static void
++machine_kexec_init_argv(struct kimage *image)
+ {
+- int i, argc = 0;
+- char *bootloader = "kexec";
+- int *kexec_argv = (int *)kexec_args[1];
++ void __user *buf;
++ size_t bufsz;
++ size_t size;
++ int i;
+
++ bufsz = 0;
+ for (i = 0; i < image->nr_segments; i++) {
+- if (!strncmp(bootloader, (char *)image->segment[i].buf,
+- strlen(bootloader))) {
+- /*
+- * convert command line string to array
+- * of parameters (as bootloader does).
+- */
+- /*
+- * Note: we do treat the 1st string "kexec" as an
+- * argument ;-) so, argc here is 1.
+- */
+- char *str = (char *)image->segment[i].buf;
+- char *ptr = strchr(str, ' ');
+- char *kbuf = (char *)kexec_argv[0];
+- /* Whenever --command-line or --append used, "kexec" is copied */
+- argc = 1;
+- /* Parse the offset */
+- while (ptr && (ARGV_MAX_ARGS > argc)) {
+- *ptr = '\0';
+- if (ptr[1] != ' ' && ptr[1] != '\0') {
+- int offt = (int)(ptr - str + 1);
+- kexec_argv[argc] = (int)kbuf + offt;
+- argc++;
+- }
+- ptr = strchr(ptr + 1, ' ');
+- }
+- if (argc > 1) {
+- /* Copy to kernel space */
+- copy_from_user(kbuf, (char *)image->segment[i].buf, image->segment[i].bufsz);
+- fw_arg0 = kexec_args[0] = argc;
+- }
+- break;
++ struct kexec_segment *seg;
++
++ seg = &image->segment[i];
++ if (seg->bufsz < 6)
++ continue;
++
++ if (strncmp((char *) seg->buf, "kexec", 5))
++ continue;
++
++ /* don't copy "kexec" */
++ buf = seg->buf + 5;
++ bufsz = seg->bufsz - 5;
++ break;
++ }
++
++ if (i >= image->nr_segments)
++ return;
++
++ size = KEXEC_MIPS_ARGV_BUF_SIZE - 1;
++ size = min(size, bufsz);
++ if (size < bufsz)
++ pr_warn("kexec command line truncated to %d bytes\n", size);
++
++ /* Copy to kernel space */
++ copy_from_user(kexec_argv_buf, buf, size);
++}
++
++static void
++machine_kexec_parse_argv(struct kimage *image)
++{
++ char *reboot_code_buffer;
++ int reloc_delta;
++ char *ptr;
++ int argc;
++ int i;
++
++ ptr = kexec_argv_buf;
++ argc = 0;
++
++ /*
++ * convert command line string to array of parameters
++ * (as bootloader does).
++ */
++ while (ptr && *ptr && (KEXEC_MIPS_ARGV_MAX_ARGS > argc)) {
++ if (*ptr == ' ') {
++ *ptr++ = '\0';
++ continue;
+ }
++
++ kexec_argv[argc++] = (int) ptr;
++ ptr = strchr(ptr, ' ');
+ }
+
+- pr_info("argc = %lu\n", kexec_args[0]);
+- for (i = 0; i < kexec_args[0]; i++)
+- pr_info("argv[%d] = %p, %s\n", i, (char *)kexec_argv[i], (char *)kexec_argv[i]);
++ if (!argc)
++ return;
+
+- return 0;
++ kexec_args[0] = argc;
++ kexec_args[1] = (int) kexec_argv;
++ kexec_args[2] = 0;
++ kexec_args[3] = 0;
++
++ reboot_code_buffer = page_address(image->control_code_page);
++ reloc_delta = reboot_code_buffer - (char *) &__start___kexec_relocate;
++
++ kexec_args[1] += reloc_delta;
++ for (i = 0; i < argc; i++)
++ kexec_argv[i] += reloc_delta;
+ }
+
+ int
+@@ -95,8 +132,14 @@ machine_kexec_prepare(struct kimage *kim
+ *
+ * This can be overrided by _machine_kexec_prepare().
+ */
+- machine_kexec_init_args();
+- machine_kexec_pass_args(kimage);
++
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ machine_kexec_init_argv(kimage);
++ machine_kexec_parse_argv(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+@@ -152,11 +195,13 @@ machine_kexec(struct kimage *image)
+ pr_info("kexec_indirection_page = %p\n",
+ (void *)kexec_indirection_page);
+
++ pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
++ &__start___kexec_relocate, (void *)reboot_code_buffer);
++
+ memcpy((void *)reboot_code_buffer, &__start___kexec_relocate,
+ kexec_relocate_size);
+
+- pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
+- &__start___kexec_relocate, (void *)reboot_code_buffer);
++ machine_kexec_print_args();
+
+ /*
+ * The generic kexec code builds a page list with physical
diff --git a/target/linux/generic/patches-3.3/400-rootfs_split.patch b/target/linux/generic/patches-3.3/400-rootfs_split.patch
new file mode 100644
index 0000000..debe930
--- /dev/null
+++ b/target/linux/generic/patches-3.3/400-rootfs_split.patch
@@ -0,0 +1,327 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -23,6 +23,14 @@ config MTD_TESTS
+ WARNING: some of the tests will ERASE entire MTD device which they
+ test. Do not use these tests unless you really know what you do.
+
++config MTD_ROOTFS_ROOT_DEV
++ bool "Automatically set 'rootfs' partition to be root filesystem"
++ default y
++
++config MTD_ROOTFS_SPLIT
++ bool "Automatically split 'rootfs' partition for squashfs"
++ default y
++
+ config MTD_REDBOOT_PARTS
+ tristate "RedBoot partition table parsing"
+ ---help---
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -29,6 +29,8 @@
+ #include <linux/kmod.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/root_dev.h>
++#include <linux/magic.h>
+ #include <linux/err.h>
+
+ #include "mtdcore.h"
+@@ -50,7 +52,7 @@ struct mtd_part {
+ * the pointer to that structure with this macro.
+ */
+ #define PART(x) ((struct mtd_part *)(x))
+-
++#define IS_PART(mtd) (mtd->read == part_read)
+
+ /*
+ * MTD methods which simply translate the effective address and pass through
+@@ -643,6 +645,155 @@ int mtd_del_partition(struct mtd_info *m
+ }
+ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++#define ROOTFS_SPLIT_NAME "rootfs_data"
++#define ROOTFS_REMOVED_NAME "<removed>"
++
++struct squashfs_super_block {
++ __le32 s_magic;
++ __le32 pad0[9];
++ __le64 bytes_used;
++};
++
++
++static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
++{
++ struct squashfs_super_block sb;
++ int len, ret;
++
++ ret = master->read(master, offset, sizeof(sb), &len, (void *) &sb);
++ if (ret || (len != sizeof(sb))) {
++ printk(KERN_ALERT "split_squashfs: error occured while reading "
++ "from \"%s\"\n", master->name);
++ return -EINVAL;
++ }
++
++ if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
++ printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
++ master->name);
++ *split_offset = 0;
++ return 0;
++ }
++
++ if (le64_to_cpu((sb.bytes_used)) <= 0) {
++ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
++ master->name);
++ *split_offset = 0;
++ return 0;
++ }
++
++ len = (u32) le64_to_cpu(sb.bytes_used);
++ len += (offset & 0x000fffff);
++ len += (master->erasesize - 1);
++ len &= ~(master->erasesize - 1);
++ len -= (offset & 0x000fffff);
++ *split_offset = offset + len;
++
++ return 0;
++}
++
++static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, const struct mtd_partition *part)
++{
++ struct mtd_partition *dpart;
++ struct mtd_part *slave = NULL;
++ struct mtd_part *spart;
++ int ret, split_offset = 0;
++
++ spart = PART(rpart);
++ ret = split_squashfs(master, spart->offset, &split_offset);
++ if (ret)
++ return ret;
++
++ if (split_offset <= 0)
++ return 0;
++
++ dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
++ if (dpart == NULL) {
++ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
++ ROOTFS_SPLIT_NAME);
++ return -ENOMEM;
++ }
++
++ memcpy(dpart, part, sizeof(*part));
++ dpart->name = (unsigned char *)&dpart[1];
++ strcpy(dpart->name, ROOTFS_SPLIT_NAME);
++
++ dpart->size = rpart->size - (split_offset - spart->offset);
++ dpart->offset = split_offset;
++
++ if (dpart == NULL)
++ return 1;
++
++ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%llX, len=%llX \n",
++ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
++
++ slave = allocate_partition(master, dpart, 0, split_offset);
++ if (IS_ERR(slave))
++ return PTR_ERR(slave);
++ mutex_lock(&mtd_partitions_mutex);
++ list_add(&slave->list, &mtd_partitions);
++ mutex_unlock(&mtd_partitions_mutex);
++
++ add_mtd_device(&slave->mtd);
++
++ rpart->split = &slave->mtd;
++
++ return 0;
++}
++
++static int refresh_rootfs_split(struct mtd_info *mtd)
++{
++ struct mtd_partition tpart;
++ struct mtd_part *part;
++ char *name;
++ //int index = 0;
++ int offset, size;
++ int ret;
++
++ part = PART(mtd);
++
++ /* check for the new squashfs offset first */
++ ret = split_squashfs(part->master, part->offset, &offset);
++ if (ret)
++ return ret;
++
++ if ((offset > 0) && !mtd->split) {
++ printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
++ /* if we don't have a rootfs split partition, create a new one */
++ tpart.name = (char *) mtd->name;
++ tpart.size = mtd->size;
++ tpart.offset = part->offset;
++
++ return split_rootfs_data(part->master, &part->mtd, &tpart);
++ } else if ((offset > 0) && mtd->split) {
++ /* update the offsets of the existing partition */
++ size = mtd->size + part->offset - offset;
++
++ part = PART(mtd->split);
++ part->offset = offset;
++ part->mtd.size = size;
++ printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
++ __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
++ (u32) part->offset, (u32) part->mtd.size);
++ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
++ strcpy(name, ROOTFS_SPLIT_NAME);
++ part->mtd.name = name;
++ } else if ((offset <= 0) && mtd->split) {
++ printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
++
++ /* mark existing partition as removed */
++ part = PART(mtd->split);
++ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
++ strcpy(name, ROOTFS_REMOVED_NAME);
++ part->mtd.name = name;
++ part->offset = 0;
++ part->mtd.size = 0;
++ }
++
++ return 0;
++}
++#endif /* CONFIG_MTD_ROOTFS_SPLIT */
++
+ /*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+@@ -659,6 +810,9 @@ int add_mtd_partitions(struct mtd_info *
+ struct mtd_part *slave;
+ uint64_t cur_offset = 0;
+ int i;
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++ int ret;
++#endif
+
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+
+@@ -673,12 +827,53 @@ int add_mtd_partitions(struct mtd_info *
+
+ add_mtd_device(&slave->mtd);
+
++ if (!strcmp(parts[i].name, "rootfs")) {
++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
++ if (ROOT_DEV == 0) {
++ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
++ "set to be root filesystem\n");
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
++ }
++#endif
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++ ret = split_rootfs_data(master, &slave->mtd, &parts[i]);
++ /* if (ret == 0)
++ * j++; */
++#endif
++ }
++
+ cur_offset = slave->offset + slave->mtd.size;
+ }
+
+ return 0;
+ }
+
++int mtd_device_refresh(struct mtd_info *mtd)
++{
++ int ret = 0;
++
++ if (IS_PART(mtd)) {
++ struct mtd_part *part;
++ struct mtd_info *master;
++
++ part = PART(mtd);
++ master = part->master;
++ if (master->refresh_device)
++ ret = master->refresh_device(master);
++ }
++
++ if (!ret && mtd->refresh_device)
++ ret = mtd->refresh_device(mtd);
++
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++ if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
++ refresh_rootfs_split(mtd);
++#endif
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mtd_device_refresh);
++
+ static DEFINE_SPINLOCK(part_parser_lock);
+ static LIST_HEAD(part_parsers);
+
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -1005,6 +1005,12 @@ static int mtdchar_ioctl(struct file *fi
+ break;
+ }
+
++ case MTDREFRESH:
++ {
++ ret = mtd_device_refresh(mtd);
++ break;
++ }
++
+ default:
+ ret = -ENOTTY;
+ }
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -114,6 +114,7 @@ struct nand_ecclayout {
+
+ struct module; /* only needed for owner field in mtd_info */
+
++struct mtd_info;
+ struct mtd_info {
+ u_char type;
+ uint32_t flags;
+@@ -214,6 +215,9 @@ struct mtd_info {
+ int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*suspend) (struct mtd_info *mtd);
+ void (*resume) (struct mtd_info *mtd);
++ int (*refresh_device)(struct mtd_info *mtd);
++ struct mtd_info *split;
++
+ /*
+ * If the driver is something smart, like UBI, it may need to maintain
+ * its own reference counting. The below functions are only for driver.
+@@ -504,6 +508,7 @@ extern int mtd_device_parse_register(str
+ int defnr_parts);
+ #define mtd_device_register(master, parts, nr_parts) \
+ mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
++extern int mtd_device_refresh(struct mtd_info *master);
+ extern int mtd_device_unregister(struct mtd_info *master);
+ extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
+ extern int __get_mtd_device(struct mtd_info *mtd);
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -36,12 +36,14 @@
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
+
++struct mtd_partition;
+ struct mtd_partition {
+ char *name; /* identifier string */
+ uint64_t size; /* partition size */
+ uint64_t offset; /* offset within the master MTD space */
+ uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
++ int (*refresh_partition)(struct mtd_info *);
+ };
+
+ #define MTDPART_OFS_RETAIN (-3)
+--- a/include/mtd/mtd-abi.h
++++ b/include/mtd/mtd-abi.h
+@@ -202,6 +202,7 @@ struct otp_info {
+ * without OOB, e.g., NOR flash.
+ */
+ #define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
++#define MTDREFRESH _IO('M', 50)
+
+ /*
+ * Obsolete legacy interface. Keep it in order not to break userspace
diff --git a/target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch b/target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch
new file mode 100644
index 0000000..3b22cfc
--- /dev/null
+++ b/target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch
@@ -0,0 +1,145 @@
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -35,6 +35,8 @@
+
+ #include "mtdcore.h"
+
++#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
++
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+ static DEFINE_MUTEX(mtd_partitions_mutex);
+@@ -252,13 +254,60 @@ static int part_erase(struct mtd_info *m
+ return -EROFS;
+ if (instr->addr >= mtd->size)
+ return -EINVAL;
++
++ instr->partial_start = false;
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ size_t readlen = 0;
++ u64 mtd_ofs;
++
++ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
++ if (!instr->erase_buf)
++ return -ENOMEM;
++
++ mtd_ofs = part->offset + instr->addr;
++ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
++
++ if (instr->erase_buf_ofs > 0) {
++ instr->addr -= instr->erase_buf_ofs;
++ ret = mtd_read(part->master,
++ instr->addr + part->offset,
++ part->master->erasesize,
++ &readlen, instr->erase_buf);
++
++ instr->partial_start = true;
++ } else {
++ mtd_ofs = part->offset + part->mtd.size;
++ instr->erase_buf_ofs = part->master->erasesize -
++ do_div(mtd_ofs, part->master->erasesize);
++
++ if (instr->erase_buf_ofs > 0) {
++ instr->len += instr->erase_buf_ofs;
++ ret = mtd_read(part->master,
++ part->offset + instr->addr +
++ instr->len - part->master->erasesize,
++ part->master->erasesize, &readlen,
++ instr->erase_buf);
++ } else {
++ ret = 0;
++ }
++ }
++ if (ret < 0) {
++ kfree(instr->erase_buf);
++ return ret;
++ }
++
++ }
++
+ instr->addr += part->offset;
+ ret = mtd_erase(part->master, instr);
+ if (ret) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
++ if (mtd->flags & MTD_ERASE_PARTIAL)
++ kfree(instr->erase_buf);
+ }
++
+ return ret;
+ }
+
+@@ -266,7 +315,25 @@ void mtd_erase_callback(struct erase_inf
+ {
+ if (instr->mtd->erase == part_erase) {
+ struct mtd_part *part = PART(instr->mtd);
++ size_t wrlen = 0;
+
++ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
++ if (instr->partial_start) {
++ part->master->write(part->master,
++ instr->addr, instr->erase_buf_ofs,
++ &wrlen, instr->erase_buf);
++ instr->addr += instr->erase_buf_ofs;
++ } else {
++ instr->len -= instr->erase_buf_ofs;
++ part->master->write(part->master,
++ instr->addr + instr->len,
++ instr->erase_buf_ofs, &wrlen,
++ instr->erase_buf +
++ part->master->erasesize -
++ instr->erase_buf_ofs);
++ }
++ kfree(instr->erase_buf);
++ }
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+@@ -537,18 +604,24 @@ static struct mtd_part *allocate_partiti
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+ /* Doesn't start on a boundary of major erase size */
+- /* FIXME: Let it be writable if it is on a boundary of
+- * _minor_ erase size though */
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+- part->name);
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++ if (((u32) slave->mtd.size) > master->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+- part->name);
++ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++
++ if ((u32) slave->mtd.size > master->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
++ if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
++ printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
++ part->name);
+
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -58,6 +58,10 @@ struct erase_info {
+ u_long priv;
+ u_char state;
+ struct erase_info *next;
++
++ u8 *erase_buf;
++ u32 erase_buf_ofs;
++ bool partial_start;
+ };
+
+ struct mtd_erase_region_info {
diff --git a/target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch b/target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch
new file mode 100644
index 0000000..251f522
--- /dev/null
+++ b/target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch
@@ -0,0 +1,18 @@
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -35,6 +35,7 @@
+ * Note: writeable partitions require their size and offset be
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
++struct mtd_info;
+
+ struct mtd_partition;
+ struct mtd_partition {
+@@ -52,7 +53,6 @@ struct mtd_partition {
+ #define MTDPART_SIZ_FULL (0)
+
+
+-struct mtd_info;
+ struct device_node;
+
+ /**
diff --git a/target/linux/generic/patches-3.3/420-redboot_space.patch b/target/linux/generic/patches-3.3/420-redboot_space.patch
new file mode 100644
index 0000000..fb6700b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/420-redboot_space.patch
@@ -0,0 +1,30 @@
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -267,14 +267,21 @@ static int parse_redboot_partitions(stru
+ #endif
+ names += strlen(names)+1;
+
+-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+- i++;
+- parts[i].offset = parts[i-1].size + parts[i-1].offset;
+- parts[i].size = fl->next->img->flash_base - parts[i].offset;
+- parts[i].name = nullname;
+- }
++ if (!strcmp(parts[i].name, "rootfs")) {
++ parts[i].size = fl->next->img->flash_base;
++ parts[i].size &= ~(master->erasesize - 1);
++ parts[i].size -= parts[i].offset;
++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
++ nrparts--;
++ } else {
++ i++;
++ parts[i].offset = parts[i-1].size + parts[i-1].offset;
++ parts[i].size = fl->next->img->flash_base - parts[i].offset;
++ parts[i].name = nullname;
+ #endif
++ }
++ }
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
diff --git a/target/linux/generic/patches-3.3/421-redboot_boardconfig.patch b/target/linux/generic/patches-3.3/421-redboot_boardconfig.patch
new file mode 100644
index 0000000..db8377b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/421-redboot_boardconfig.patch
@@ -0,0 +1,60 @@
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -30,6 +30,8 @@
+ #include <linux/mtd/partitions.h>
+ #include <linux/module.h>
+
++#define BOARD_CONFIG_PART "boardconfig"
++
+ struct fis_image_desc {
+ unsigned char name[16]; // Null terminated name
+ uint32_t flash_base; // Address within FLASH of image
+@@ -60,6 +62,7 @@ static int parse_redboot_partitions(stru
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+ {
++ unsigned long max_offset = 0;
+ int nrparts = 0;
+ struct fis_image_desc *buf;
+ struct mtd_partition *parts;
+@@ -227,14 +230,14 @@ static int parse_redboot_partitions(stru
+ }
+ }
+ #endif
+- parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
++ parts = kzalloc(sizeof(*parts) * (nrparts + 1) + nulllen + namelen + sizeof(BOARD_CONFIG_PART), GFP_KERNEL);
+
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- nullname = (char *)&parts[nrparts];
++ nullname = (char *)&parts[nrparts + 1];
+ #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (nulllen > 0) {
+ strcpy(nullname, nullstring);
+@@ -253,6 +256,8 @@ static int parse_redboot_partitions(stru
+ }
+ #endif
+ for ( ; i<nrparts; i++) {
++ if(max_offset < buf[i].flash_base + buf[i].size)
++ max_offset = buf[i].flash_base + buf[i].size;
+ parts[i].size = fl->img->size;
+ parts[i].offset = fl->img->flash_base;
+ parts[i].name = names;
+@@ -286,6 +291,14 @@ static int parse_redboot_partitions(stru
+ fl = fl->next;
+ kfree(tmp_fl);
+ }
++ if(master->size - max_offset >= master->erasesize)
++ {
++ parts[nrparts].size = master->size - max_offset;
++ parts[nrparts].offset = max_offset;
++ parts[nrparts].name = names;
++ strcpy(names, BOARD_CONFIG_PART);
++ nrparts++;
++ }
+ ret = nrparts;
+ *pparts = parts;
+ out:
diff --git a/target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch b/target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch
new file mode 100644
index 0000000..d60126d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch
@@ -0,0 +1,35 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -156,6 +156,22 @@ config MTD_BCM63XX_PARTS
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
++config MTD_MYLOADER_PARTS
++ tristate "MyLoader partition parsing"
++ depends on ADM5120 || ATHEROS_AR231X || ATHEROS_AR71XX || ATH79
++ ---help---
++ MyLoader is a bootloader which allows the user to define partitions
++ in flash devices, by putting a table in the second erase block
++ on the device, similar to a partition table. This table gives the
++ offsets and lengths of the user defined partitions.
++
++ If you need code which can detect and parse these tables, and
++ register MTD 'partitions' corresponding to each image detected,
++ enable this option.
++
++ You will still need the parsing functions to be called by the driver
++ for your particular device. It won't happen automatically.
++
+ comment "User Modules And Translation Layers"
+
+ config MTD_CHAR
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdli
+ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
+
+ # 'Users' - code which presents functionality to userspace.
+ obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/target/linux/generic/patches-3.3/440-block2mtd_init.patch b/target/linux/generic/patches-3.3/440-block2mtd_init.patch
new file mode 100644
index 0000000..8ca6605
--- /dev/null
+++ b/target/linux/generic/patches-3.3/440-block2mtd_init.patch
@@ -0,0 +1,116 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -14,6 +14,7 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
+ #include <linux/mutex.h>
+ #include <linux/mount.h>
+ #include <linux/slab.h>
+@@ -231,11 +232,12 @@ static void block2mtd_free_device(struct
+
+
+ /* FIXME: ensure that mtd->size % erase_size == 0 */
+-static struct block2mtd_dev *add_device(char *devname, int erase_size)
++static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
+ {
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block_device *bdev;
+ struct block2mtd_dev *dev;
++ struct mtd_partition *part;
+ char *name;
+
+ if (!devname)
+@@ -274,13 +276,16 @@ static struct block2mtd_dev *add_device(
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
++ if (!mtdname)
++ mtdname = devname;
++ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
+ if (!name)
+ goto devinit_err;
+
++ strcpy(name, mtdname);
+ dev->mtd.name = name;
+
+- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
++ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.type = MTD_RAM;
+@@ -293,14 +298,17 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+- if (mtd_device_register(&dev->mtd, NULL, 0)) {
++ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++ part->name = name;
++ part->offset = 0;
++ part->size = dev->mtd.size;
++ if (mtd_device_register(&dev->mtd, part, 1)) {
+ /* Device didn't get added, so free the entry */
+ goto devinit_err;
+ }
+ list_add(&dev->list, &blkmtd_device_list);
+ INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
+- dev->mtd.name + strlen("block2mtd: "),
+- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
++ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+ devinit_err:
+@@ -373,9 +381,9 @@ static char block2mtd_paramline[80 + 12]
+
+ static int block2mtd_setup2(const char *val)
+ {
+- char buf[80 + 12]; /* 80 for device, 12 for erase size */
++ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
+ char *str = buf;
+- char *token[2];
++ char *token[3];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ int i, ret;
+@@ -386,7 +394,7 @@ static int block2mtd_setup2(const char *
+ strcpy(str, val);
+ kill_final_newline(str);
+
+- for (i = 0; i < 2; i++)
++ for (i = 0; i < 3; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str)
+@@ -405,8 +413,10 @@ static int block2mtd_setup2(const char *
+ parse_err("illegal erase size");
+ }
+ }
++ if (token[2] && (strlen(token[2]) + 1 > 80))
++ parse_err("mtd device name too long");
+
+- add_device(name, erase_size);
++ add_device(name, erase_size, token[2]);
+
+ return 0;
+ }
+@@ -440,7 +450,7 @@ static int block2mtd_setup(const char *v
+
+
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+
+ static int __init block2mtd_init(void)
+ {
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -514,6 +514,7 @@ rescan:
+ kfree(state);
+ return 0;
+ }
++EXPORT_SYMBOL(rescan_partitions);
+
+ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+ {
diff --git a/target/linux/generic/patches-3.3/441-block2mtd_refresh.patch b/target/linux/generic/patches-3.3/441-block2mtd_refresh.patch
new file mode 100644
index 0000000..48b0a46
--- /dev/null
+++ b/target/linux/generic/patches-3.3/441-block2mtd_refresh.patch
@@ -0,0 +1,291 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -29,6 +29,8 @@ struct block2mtd_dev {
+ struct block_device *blkdev;
+ struct mtd_info mtd;
+ struct mutex write_mutex;
++ rwlock_t bdev_mutex;
++ char devname[0];
+ };
+
+
+@@ -81,6 +83,12 @@ static int block2mtd_erase(struct mtd_in
+ size_t len = instr->len;
+ int err;
+
++ read_lock(&dev->bdev_mutex);
++ if (!dev->blkdev) {
++ err = -EINVAL;
++ goto done;
++ }
++
+ instr->state = MTD_ERASING;
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_erase(dev, from, len);
+@@ -92,6 +100,10 @@ static int block2mtd_erase(struct mtd_in
+ instr->state = MTD_ERASE_DONE;
+
+ mtd_erase_callback(instr);
++
++done:
++ read_unlock(&dev->bdev_mutex);
++
+ return err;
+ }
+
+@@ -103,10 +115,14 @@ static int block2mtd_read(struct mtd_inf
+ struct page *page;
+ int index = from >> PAGE_SHIFT;
+ int offset = from & (PAGE_SIZE-1);
+- int cpylen;
++ int cpylen, err = 0;
++
++ read_lock(&dev->bdev_mutex);
++ if (!dev->blkdev || (from > mtd->size)) {
++ err = -EINVAL;
++ goto done;
++ }
+
+- if (from > mtd->size)
+- return -EINVAL;
+ if (from + len > mtd->size)
+ len = mtd->size - from;
+
+@@ -121,10 +137,14 @@ static int block2mtd_read(struct mtd_inf
+ len = len - cpylen;
+
+ page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+- if (!page)
+- return -ENOMEM;
+- if (IS_ERR(page))
+- return PTR_ERR(page);
++ if (!page) {
++ err = -ENOMEM;
++ goto done;
++ }
++ if (IS_ERR(page)) {
++ err = PTR_ERR(page);
++ goto done;
++ }
+
+ memcpy(buf, page_address(page) + offset, cpylen);
+ page_cache_release(page);
+@@ -135,7 +155,10 @@ static int block2mtd_read(struct mtd_inf
+ offset = 0;
+ index++;
+ }
+- return 0;
++
++done:
++ read_unlock(&dev->bdev_mutex);
++ return err;
+ }
+
+
+@@ -187,12 +210,22 @@ static int block2mtd_write(struct mtd_in
+ size_t *retlen, const u_char *buf)
+ {
+ struct block2mtd_dev *dev = mtd->priv;
+- int err;
++ int err = 0;
++
++ read_lock(&dev->bdev_mutex);
++ if (!dev->blkdev) {
++ err = -EINVAL;
++ goto done;
++ }
+
+ if (!len)
+- return 0;
+- if (to >= mtd->size)
+- return -ENOSPC;
++ goto done;
++
++ if (to >= mtd->size) {
++ err = -ENOSPC;
++ goto done;
++ }
++
+ if (to + len > mtd->size)
+ len = mtd->size - to;
+
+@@ -201,6 +234,9 @@ static int block2mtd_write(struct mtd_in
+ mutex_unlock(&dev->write_mutex);
+ if (err > 0)
+ err = 0;
++
++done:
++ read_unlock(&dev->bdev_mutex);
+ return err;
+ }
+
+@@ -209,33 +245,110 @@ static int block2mtd_write(struct mtd_in
+ static void block2mtd_sync(struct mtd_info *mtd)
+ {
+ struct block2mtd_dev *dev = mtd->priv;
++ read_lock(&dev->bdev_mutex);
++ if (dev->blkdev)
+ sync_blockdev(dev->blkdev);
++ read_unlock(&dev->bdev_mutex);
++
+ return;
+ }
+
+
++static int _open_bdev(struct block2mtd_dev *dev)
++{
++ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
++ struct block_device *bdev;
++
++ /* Get a handle on the device */
++ bdev = blkdev_get_by_path(dev->devname, mode, dev);
++#ifndef MODULE
++ if (IS_ERR(bdev)) {
++ dev_t devt;
++
++ /* We might not have rootfs mounted at this point. Try
++ to resolve the device name by other means. */
++
++ devt = name_to_dev_t(dev->devname);
++ if (devt)
++ bdev = blkdev_get_by_dev(devt, mode, dev);
++ }
++#endif
++
++ if (IS_ERR(bdev)) {
++ ERROR("error: cannot open device %s", dev->devname);
++ return 1;
++ }
++ dev->blkdev = bdev;
++
++ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
++ ERROR("attempting to use an MTD device as a block device");
++ return 1;
++ }
++
++ return 0;
++}
++
++static void _close_bdev(struct block2mtd_dev *dev)
++{
++ struct block_device *bdev;
++
++ if (!dev->blkdev)
++ return;
++
++ bdev = dev->blkdev;
++ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
++ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
++ dev->blkdev = NULL;
++}
++
+ static void block2mtd_free_device(struct block2mtd_dev *dev)
+ {
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+-
+- if (dev->blkdev) {
+- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
+- 0, -1);
+- blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+- }
+-
++ _close_bdev(dev);
+ kfree(dev);
+ }
+
+
+-/* FIXME: ensure that mtd->size % erase_size == 0 */
+-static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
++static int block2mtd_refresh(struct mtd_info *mtd)
+ {
+- const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
++ struct block2mtd_dev *dev = mtd->priv;
+ struct block_device *bdev;
++ dev_t devt;
++ int err = 0;
++
++ /* no other mtd function can run at this point */
++ write_lock(&dev->bdev_mutex);
++
++ /* get the device number for the whole disk */
++ devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
++
++ /* close the old block device */
++ _close_bdev(dev);
++
++ /* open the whole disk, issue a partition rescan, then */
++ bdev = blkdev_get_by_dev(devt, FMODE_WRITE | FMODE_READ, mtd);
++ if (!bdev || !bdev->bd_disk)
++ err = -EINVAL;
++#ifndef CONFIG_MTD_BLOCK2MTD_MODULE
++ else
++ err = rescan_partitions(bdev->bd_disk, bdev);
++#endif
++ if (bdev)
++ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
++
++ /* try to open the partition block device again */
++ _open_bdev(dev);
++ write_unlock(&dev->bdev_mutex);
++
++ return err;
++}
++
++/* FIXME: ensure that mtd->size % erase_size == 0 */
++static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
++{
+ struct block2mtd_dev *dev;
+ struct mtd_partition *part;
+ char *name;
+@@ -243,36 +356,17 @@ static struct block2mtd_dev *add_device(
+ if (!devname)
+ return NULL;
+
+- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
++ dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+- /* Get a handle on the device */
+- bdev = blkdev_get_by_path(devname, mode, dev);
+-#ifndef MODULE
+- if (IS_ERR(bdev)) {
+-
+- /* We might not have rootfs mounted at this point. Try
+- to resolve the device name by other means. */
++ strcpy(dev->devname, devname);
+
+- dev_t devt = name_to_dev_t(devname);
+- if (devt)
+- bdev = blkdev_get_by_dev(devt, mode, dev);
+- }
+-#endif
+-
+- if (IS_ERR(bdev)) {
+- ERROR("error: cannot open device %s", devname);
++ if (_open_bdev(dev))
+ goto devinit_err;
+- }
+- dev->blkdev = bdev;
+-
+- if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+- ERROR("attempting to use an MTD device as a block device");
+- goto devinit_err;
+- }
+
+ mutex_init(&dev->write_mutex);
++ rwlock_init(&dev->bdev_mutex);
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+@@ -297,6 +391,7 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.read = block2mtd_read;
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
++ dev->mtd.refresh_device = block2mtd_refresh;
+
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = name;
diff --git a/target/linux/generic/patches-3.3/442-block2mtd_probe.patch b/target/linux/generic/patches-3.3/442-block2mtd_probe.patch
new file mode 100644
index 0000000..c427e9f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/442-block2mtd_probe.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -268,6 +268,7 @@ static int _open_bdev(struct block2mtd_d
+ /* We might not have rootfs mounted at this point. Try
+ to resolve the device name by other means. */
+
++ wait_for_device_probe();
+ devt = name_to_dev_t(dev->devname);
+ if (devt)
+ bdev = blkdev_get_by_dev(devt, mode, dev);
diff --git a/target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch b/target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch
new file mode 100644
index 0000000..725877a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch
@@ -0,0 +1,37 @@
+---
+ drivers/mtd/nand/plat_nand.c | 13 ++++++++++++-
+ include/linux/mtd/nand.h | 1 +
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -622,6 +622,7 @@ struct platform_nand_chip {
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
++ int (*chip_fixup)(struct mtd_info *mtd);
+ };
+
+ /* Keep gcc happy */
+--- a/drivers/mtd/nand/plat_nand.c
++++ b/drivers/mtd/nand/plat_nand.c
+@@ -93,7 +93,18 @@ static int __devinit plat_nand_probe(str
+ }
+
+ /* Scan to find existence of the device */
+- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
++ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) {
++ res = -ENXIO;
++ goto out;
++ }
++
++ if (pdata->chip.chip_fixup) {
++ res = pdata->chip.chip_fixup(&data->mtd);
++ if (res)
++ goto out;
++ }
++
++ if (nand_scan_tail(&data->mtd)) {
+ err = -ENXIO;
+ goto out;
+ }
diff --git a/target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch b/target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch
new file mode 100644
index 0000000..2f72d85
--- /dev/null
+++ b/target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch
@@ -0,0 +1,12 @@
+--- a/drivers/mtd/nand/nand_ecc.c
++++ b/drivers/mtd/nand/nand_ecc.c
+@@ -507,8 +507,7 @@ int __nand_correct_data(unsigned char *b
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+
+- printk(KERN_ERR "uncorrectable error : ");
+- return -1;
++ return -EBADMSG;
+ }
+ EXPORT_SYMBOL(__nand_correct_data);
+
diff --git a/target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch b/target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch
new file mode 100644
index 0000000..0c4b9be
--- /dev/null
+++ b/target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch
@@ -0,0 +1,11 @@
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -682,7 +682,7 @@ static int get_chip(struct map_info *map
+ return 0;
+
+ case FL_ERASING:
+- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
diff --git a/target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch b/target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch
new file mode 100644
index 0000000..ff27d04
--- /dev/null
+++ b/target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch
@@ -0,0 +1,39 @@
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -45,6 +45,7 @@
+ #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
+ #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
+ #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
++#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips*/
+ #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
+ #define OPCODE_RDID 0x9f /* Read JEDEC ID */
+
+@@ -625,6 +626,7 @@ struct flash_info {
+ u16 flags;
+ #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
+ #define M25P_NO_ERASE 0x02 /* No erase command needed */
++#define SECT_4K_PMC 0x04 /* OPCODE_BE_4K_PMC works uniformly */
+ };
+
+ #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+@@ -686,6 +688,10 @@ static const struct spi_device_id m25p_i
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+
++ /* PMC -- pm25x "blocks" are 32K, sectors are 4K */
++ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
++ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
++
+ /* Spansion -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+@@ -921,6 +927,9 @@ static int __devinit m25p_probe(struct s
+ if (info->flags & SECT_4K) {
+ flash->erase_opcode = OPCODE_BE_4K;
+ flash->mtd.erasesize = 4096;
++ } else if (info->flags & SECT_4K_PMC) {
++ flash->erase_opcode = OPCODE_BE_4K_PMC;
++ flash->mtd.erasesize = 4096;
+ } else {
+ flash->erase_opcode = OPCODE_SE;
+ flash->mtd.erasesize = info->sector_size;
diff --git a/target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch b/target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch
new file mode 100644
index 0000000..b00c9fa
--- /dev/null
+++ b/target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -765,6 +765,7 @@ static const struct spi_device_id m25p_i
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
++ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
diff --git a/target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
new file mode 100644
index 0000000..3b43535
--- /dev/null
+++ b/target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
@@ -0,0 +1,18 @@
+From: George Kashperko <george@znau.edu.ua>
+
+Issue map read after Write Buffer Load command to ensure chip is ready
+to receive data.
+Signed-off-by: George Kashperko <george@znau.edu.ua>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
+ 1 file changed, 1 insertion(+)
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -1409,6 +1409,7 @@ static int __xipram do_write_buffer(stru
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
++ (void) map_read(map, cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
diff --git a/target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch b/target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch
new file mode 100644
index 0000000..231d454
--- /dev/null
+++ b/target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch
@@ -0,0 +1,41 @@
+--- a/drivers/mtd/devices/Kconfig
++++ b/drivers/mtd/devices/Kconfig
+@@ -102,6 +102,14 @@ config M25PXX_USE_FAST_READ
+ help
+ This option enables FAST_READ access supported by ST M25Pxx.
+
++config M25PXX_PREFER_SMALL_SECTOR_ERASE
++ bool "Prefer small sector erase"
++ depends on MTD_M25P80
++ default y
++ help
++ This option enables use of the small erase sectors if that is
++ supported by the flash chip.
++
+ config MTD_SST25L
+ tristate "Support SST25L (non JEDEC) SPI Flash chips"
+ depends on SPI_MASTER
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -84,6 +84,12 @@
+
+ #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
++#ifdef CONFIG_M25PXX_PREFER_SMALL_SECTOR_ERASE
++#define PREFER_SMALL_SECTOR_ERASE 1
++#else
++#define PREFER_SMALL_SECTOR_ERASE 0
++#endif
++
+ /****************************************************************************/
+
+ struct m25p {
+@@ -925,7 +931,7 @@ static int __devinit m25p_probe(struct s
+ flash->mtd.write = m25p80_write;
+
+ /* prefer "small sector" erase if possible */
+- if (info->flags & SECT_4K) {
++ if (PREFER_SMALL_SECTOR_ERASE && (info->flags & SECT_4K)) {
+ flash->erase_opcode = OPCODE_BE_4K;
+ flash->mtd.erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
diff --git a/target/linux/generic/patches-3.3/500-yaffs_support.patch b/target/linux/generic/patches-3.3/500-yaffs_support.patch
new file mode 100644
index 0000000..7776dcf
--- /dev/null
+++ b/target/linux/generic/patches-3.3/500-yaffs_support.patch
@@ -0,0 +1,18 @@
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -35,6 +35,7 @@ source "fs/gfs2/Kconfig"
+ source "fs/ocfs2/Kconfig"
+ source "fs/btrfs/Kconfig"
+ source "fs/nilfs2/Kconfig"
++source "fs/yaffs2/Kconfig"
+
+ endif # BLOCK
+
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -125,3 +125,5 @@ obj-$(CONFIG_GFS2_FS) += gfs2/
+ obj-y += exofs/ # Multiple modules
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
++obj-$(CONFIG_YAFFS_FS) += yaffs2/
++
diff --git a/target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch b/target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch
new file mode 100644
index 0000000..c334b17
--- /dev/null
+++ b/target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch
@@ -0,0 +1,12344 @@
+--- a/fs/yaffs2/devextras.h
++++ b/fs/yaffs2/devextras.h
+@@ -14,194 +14,135 @@
+ */
+
+ /*
+- * This file is just holds extra declarations used during development.
+- * Most of these are from kernel includes placed here so we can use them in
+- * applications.
++ * This file is just holds extra declarations of macros that would normally
++ * be providesd in the Linux kernel. These macros have been written from
++ * scratch but are functionally equivalent to the Linux ones.
+ *
+ */
+
+ #ifndef __EXTRAS_H__
+ #define __EXTRAS_H__
+
+-#if defined WIN32
+-#define __inline__ __inline
+-#define new newHack
+-#endif
+-
+-#if !(defined __KERNEL__) || (defined WIN32)
+
+-/* User space defines */
++#if !(defined __KERNEL__)
+
++/* Definition of types */
+ typedef unsigned char __u8;
+ typedef unsigned short __u16;
+ typedef unsigned __u32;
+
++#endif
++
+ /*
+- * Simple doubly linked list implementation.
+- *
+- * Some of the internal functions ("__xxx") are useful when
+- * manipulating whole lists rather than single entries, as
+- * sometimes we already know the next/prev entries and we can
+- * generate better code by using them directly rather than
+- * using the generic single-entry routines.
++ * This is a simple doubly linked list implementation that matches the
++ * way the Linux kernel doubly linked list implementation works.
+ */
+
+-#define prefetch(x) 1
+-
+-struct list_head {
+- struct list_head *next, *prev;
++struct ylist_head {
++ struct ylist_head *next; /* next in chain */
++ struct ylist_head *prev; /* previous in chain */
+ };
+
+-#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+-#define LIST_HEAD(name) \
+- struct list_head name = LIST_HEAD_INIT(name)
++/* Initialise a static list */
++#define YLIST_HEAD(name) \
++struct ylist_head name = { &(name), &(name)}
++
+
+-#define INIT_LIST_HEAD(ptr) do { \
+- (ptr)->next = (ptr); (ptr)->prev = (ptr); \
++
++/* Initialise a list head to an empty list */
++#define YINIT_LIST_HEAD(p) \
++do { \
++ (p)->next = (p);\
++ (p)->prev = (p); \
+ } while (0)
+
+-/*
+- * Insert a new entry between two known consecutive entries.
+- *
+- * This is only for internal list manipulation where we know
+- * the prev/next entries already!
+- */
+-static __inline__ void __list_add(struct list_head *new,
+- struct list_head *prev,
+- struct list_head *next)
+-{
+- next->prev = new;
+- new->next = next;
+- new->prev = prev;
+- prev->next = new;
+-}
+
+-/**
+- * list_add - add a new entry
+- * @new: new entry to be added
+- * @head: list head to add it after
+- *
+- * Insert a new entry after the specified head.
+- * This is good for implementing stacks.
+- */
+-static __inline__ void list_add(struct list_head *new, struct list_head *head)
++/* Add an element to a list */
++static __inline__ void ylist_add(struct ylist_head *newEntry,
++ struct ylist_head *list)
+ {
+- __list_add(new, head, head->next);
+-}
++ struct ylist_head *listNext = list->next;
++
++ list->next = newEntry;
++ newEntry->prev = list;
++ newEntry->next = listNext;
++ listNext->prev = newEntry;
+
+-/**
+- * list_add_tail - add a new entry
+- * @new: new entry to be added
+- * @head: list head to add it before
+- *
+- * Insert a new entry before the specified head.
+- * This is useful for implementing queues.
+- */
+-static __inline__ void list_add_tail(struct list_head *new,
+- struct list_head *head)
+-{
+- __list_add(new, head->prev, head);
+ }
+
+-/*
+- * Delete a list entry by making the prev/next entries
+- * point to each other.
+- *
+- * This is only for internal list manipulation where we know
+- * the prev/next entries already!
+- */
+-static __inline__ void __list_del(struct list_head *prev,
+- struct list_head *next)
++static __inline__ void ylist_add_tail(struct ylist_head *newEntry,
++ struct ylist_head *list)
+ {
+- next->prev = prev;
+- prev->next = next;
++ struct ylist_head *listPrev = list->prev;
++
++ list->prev = newEntry;
++ newEntry->next = list;
++ newEntry->prev = listPrev;
++ listPrev->next = newEntry;
++
+ }
+
+-/**
+- * list_del - deletes entry from list.
+- * @entry: the element to delete from the list.
+- * Note: list_empty on entry does not return true after this, the entry is
+- * in an undefined state.
+- */
+-static __inline__ void list_del(struct list_head *entry)
++
++/* Take an element out of its current list, with or without
++ * reinitialising the links.of the entry*/
++static __inline__ void ylist_del(struct ylist_head *entry)
+ {
+- __list_del(entry->prev, entry->next);
++ struct ylist_head *listNext = entry->next;
++ struct ylist_head *listPrev = entry->prev;
++
++ listNext->prev = listPrev;
++ listPrev->next = listNext;
++
+ }
+
+-/**
+- * list_del_init - deletes entry from list and reinitialize it.
+- * @entry: the element to delete from the list.
+- */
+-static __inline__ void list_del_init(struct list_head *entry)
++static __inline__ void ylist_del_init(struct ylist_head *entry)
+ {
+- __list_del(entry->prev, entry->next);
+- INIT_LIST_HEAD(entry);
++ ylist_del(entry);
++ entry->next = entry->prev = entry;
+ }
+
+-/**
+- * list_empty - tests whether a list is empty
+- * @head: the list to test.
+- */
+-static __inline__ int list_empty(struct list_head *head)
++
++/* Test if the list is empty */
++static __inline__ int ylist_empty(struct ylist_head *entry)
+ {
+- return head->next == head;
++ return (entry->next == entry);
+ }
+
+-/**
+- * list_splice - join two lists
+- * @list: the new list to add.
+- * @head: the place to add it in the first list.
++
++/* ylist_entry takes a pointer to a list entry and offsets it to that
++ * we can find a pointer to the object it is embedded in.
+ */
+-static __inline__ void list_splice(struct list_head *list,
+- struct list_head *head)
+-{
+- struct list_head *first = list->next;
+
+- if (first != list) {
+- struct list_head *last = list->prev;
+- struct list_head *at = head->next;
+-
+- first->prev = head;
+- head->next = first;
+-
+- last->next = at;
+- at->prev = last;
+- }
+-}
+
+-/**
+- * list_entry - get the struct for this entry
+- * @ptr: the &struct list_head pointer.
+- * @type: the type of the struct this is embedded in.
+- * @member: the name of the list_struct within the struct.
+- */
+-#define list_entry(ptr, type, member) \
+- ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+-
+-/**
+- * list_for_each - iterate over a list
+- * @pos: the &struct list_head to use as a loop counter.
+- * @head: the head for your list.
+- */
+-#define list_for_each(pos, head) \
+- for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+- pos = pos->next, prefetch(pos->next))
+-
+-/**
+- * list_for_each_safe - iterate over a list safe against removal
+- * of list entry
+- * @pos: the &struct list_head to use as a loop counter.
+- * @n: another &struct list_head to use as temporary storage
+- * @head: the head for your list.
+- */
+-#define list_for_each_safe(pos, n, head) \
+- for (pos = (head)->next, n = pos->next; pos != (head); \
+- pos = n, n = pos->next)
++#define ylist_entry(entry, type, member) \
++ ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
+
+-/*
+- * File types
++
++/* ylist_for_each and list_for_each_safe iterate over lists.
++ * ylist_for_each_safe uses temporary storage to make the list delete safe
+ */
++
++#define ylist_for_each(itervar, list) \
++ for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
++
++#define ylist_for_each_safe(itervar, saveVar, list) \
++ for (itervar = (list)->next, saveVar = (list)->next->next; \
++ itervar != (list); itervar = saveVar, saveVar = saveVar->next)
++
++
++#if !(defined __KERNEL__)
++
++
++#ifndef WIN32
++#include <sys/stat.h>
++#endif
++
++
++#ifdef CONFIG_YAFFS_PROVIDE_DEFS
++/* File types */
++
++
+ #define DT_UNKNOWN 0
+ #define DT_FIFO 1
+ #define DT_CHR 2
+@@ -212,6 +153,7 @@ static __inline__ void list_splice(struc
+ #define DT_SOCK 12
+ #define DT_WHT 14
+
++
+ #ifndef WIN32
+ #include <sys/stat.h>
+ #endif
+@@ -227,10 +169,6 @@ static __inline__ void list_splice(struc
+ #define ATTR_ATIME 16
+ #define ATTR_MTIME 32
+ #define ATTR_CTIME 64
+-#define ATTR_ATIME_SET 128
+-#define ATTR_MTIME_SET 256
+-#define ATTR_FORCE 512 /* Not a change, but a change it */
+-#define ATTR_ATTR_FLAG 1024
+
+ struct iattr {
+ unsigned int ia_valid;
+@@ -244,21 +182,15 @@ struct iattr {
+ unsigned int ia_attr_flags;
+ };
+
+-#define KERN_DEBUG
++#endif
+
+ #else
+
+-#ifndef WIN32
+ #include <linux/types.h>
+-#include <linux/list.h>
+ #include <linux/fs.h>
+ #include <linux/stat.h>
+-#endif
+
+ #endif
+
+-#if defined WIN32
+-#undef new
+-#endif
+
+ #endif
+--- a/fs/yaffs2/Kconfig
++++ b/fs/yaffs2/Kconfig
+@@ -5,7 +5,7 @@
+ config YAFFS_FS
+ tristate "YAFFS2 file system support"
+ default n
+- depends on MTD
++ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+@@ -43,7 +43,8 @@ config YAFFS_9BYTE_TAGS
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+- older-style format. See notes on tags formats and MTD versions.
++ older-style format. See notes on tags formats and MTD versions
++ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+@@ -109,26 +110,6 @@ config YAFFS_DISABLE_LAZY_LOAD
+
+ If unsure, say N.
+
+-config YAFFS_CHECKPOINT_RESERVED_BLOCKS
+- int "Reserved blocks for checkpointing"
+- depends on YAFFS_YAFFS2
+- default 10
+- help
+- Give the number of Blocks to reserve for checkpointing.
+- Checkpointing saves the state at unmount so that mounting is
+- much faster as a scan of all the flash to regenerate this state
+- is not needed. These Blocks are reserved per partition, so if
+- you have very small partitions the default (10) may be a mess
+- for you. You can set this value to 0, but that does not mean
+- checkpointing is disabled at all. There only won't be any
+- specially reserved blocks for checkpointing, so if there is
+- enough free space on the filesystem, it will be used for
+- checkpointing.
+-
+- If unsure, leave at default (10), but don't wonder if there are
+- always 2MB used on your large page device partition (10 x 2k
+- pagesize). When using small partitions or when being very small
+- on space, you probably want to set this to zero.
+
+ config YAFFS_DISABLE_WIDE_TNODES
+ bool "Turn off wide tnodes"
+--- a/fs/yaffs2/Makefile
++++ b/fs/yaffs2/Makefile
+@@ -5,7 +5,6 @@
+ obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+ yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
+-yaffs-y += yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
+ yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+-yaffs-y += yaffs_mtdif1.o yaffs_packedtags1.o
+-yaffs-y += yaffs_mtdif.o yaffs_mtdif2.o
++yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+--- a/fs/yaffs2/moduleconfig.h
++++ b/fs/yaffs2/moduleconfig.h
+@@ -27,12 +27,12 @@
+
+ /* Default: Not selected */
+ /* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
+-//#define CONFIG_YAFFS_DOES_ECC
++/* #define CONFIG_YAFFS_DOES_ECC */
+
+ /* Default: Not selected */
+ /* Meaning: ECC byte order is 'wrong'. Only meaningful if */
+ /* CONFIG_YAFFS_DOES_ECC is set */
+-//#define CONFIG_YAFFS_ECC_WRONG_ORDER
++/* #define CONFIG_YAFFS_ECC_WRONG_ORDER */
+
+ /* Default: Selected */
+ /* Meaning: Disables testing whether chunks are erased before writing to them*/
+@@ -54,11 +54,11 @@ that you need to continue to support. N
+ older-style format.
+ Note: Use of this option generally requires that MTD's oob layout be
+ adjusted to use the older-style format. See notes on tags formats and
+-MTD versions.
++MTD versions in yaffs_mtdif1.c.
+ */
+ /* Default: Not selected */
+ /* Meaning: Use older-style on-NAND data format with pageStatus byte */
+-#define CONFIG_YAFFS_9BYTE_TAGS
++/* #define CONFIG_YAFFS_9BYTE_TAGS */
+
+ #endif /* YAFFS_OUT_OF_TREE */
+
+--- a/fs/yaffs2/yaffs_checkptrw.c
++++ b/fs/yaffs2/yaffs_checkptrw.c
+@@ -12,48 +12,43 @@
+ */
+
+ const char *yaffs_checkptrw_c_version =
+- "$Id: yaffs_checkptrw.c,v 1.14 2007-05-15 20:07:40 charles Exp $";
++ "$Id: yaffs_checkptrw.c,v 1.18 2009-03-06 17:20:49 wookey Exp $";
+
+
+ #include "yaffs_checkptrw.h"
+-
++#include "yaffs_getblockinfo.h"
+
+ static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
+ {
+-
+ int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
+
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("checkpt blocks available = %d" TENDSTR),
+ blocksAvailable));
+
+-
+ return (blocksAvailable <= 0) ? 0 : 1;
+ }
+
+
+ static int yaffs_CheckpointErase(yaffs_Device *dev)
+ {
+-
+ int i;
+
+-
+- if(!dev->eraseBlockInNAND)
++ if (!dev->eraseBlockInNAND)
+ return 0;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checking blocks %d to %d"TENDSTR),
+- dev->internalStartBlock,dev->internalEndBlock));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checking blocks %d to %d"TENDSTR),
++ dev->internalStartBlock, dev->internalEndBlock));
+
+- for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+- if(bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("erasing checkpt block %d"TENDSTR),i));
+- if(dev->eraseBlockInNAND(dev,i- dev->blockOffset /* realign */)){
++ for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++ if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("erasing checkpt block %d"TENDSTR), i));
++ if (dev->eraseBlockInNAND(dev, i - dev->blockOffset /* realign */)) {
+ bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+- }
+- else {
+- dev->markNANDBlockBad(dev,i);
++ } else {
++ dev->markNANDBlockBad(dev, i);
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+@@ -71,23 +66,23 @@ static void yaffs_CheckpointFindNextEras
+ int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
+- dev->nErasedBlocks,dev->nReservedBlocks,blocksAvailable,dev->checkpointNextBlock));
++ dev->nErasedBlocks, dev->nReservedBlocks, blocksAvailable, dev->checkpointNextBlock));
+
+- if(dev->checkpointNextBlock >= 0 &&
+- dev->checkpointNextBlock <= dev->internalEndBlock &&
+- blocksAvailable > 0){
+-
+- for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+- if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY){
++ if (dev->checkpointNextBlock >= 0 &&
++ dev->checkpointNextBlock <= dev->internalEndBlock &&
++ blocksAvailable > 0) {
++
++ for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++ if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpointNextBlock = i + 1;
+ dev->checkpointCurrentBlock = i;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("allocating checkpt block %d"TENDSTR),i));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("allocating checkpt block %d"TENDSTR), i));
+ return;
+ }
+ }
+ }
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("out of checkpt blocks"TENDSTR)));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("out of checkpt blocks"TENDSTR)));
+
+ dev->checkpointNextBlock = -1;
+ dev->checkpointCurrentBlock = -1;
+@@ -98,30 +93,31 @@ static void yaffs_CheckpointFindNextChec
+ int i;
+ yaffs_ExtendedTags tags;
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
+ dev->blocksInCheckpoint, dev->checkpointNextBlock));
+
+- if(dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
+- for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
++ if (dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
++ for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
+ int chunk = i * dev->nChunksPerBlock;
+ int realignedChunk = chunk - dev->chunkOffset;
+
+- dev->readChunkWithTagsFromNAND(dev,realignedChunk,NULL,&tags);
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
+- i, tags.objectId,tags.sequenceNumber,tags.eccResult));
++ dev->readChunkWithTagsFromNAND(dev, realignedChunk,
++ NULL, &tags);
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
++ i, tags.objectId, tags.sequenceNumber, tags.eccResult));
+
+- if(tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA){
++ if (tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+ dev->checkpointNextBlock = tags.objectId;
+ dev->checkpointCurrentBlock = i;
+ dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
+ dev->blocksInCheckpoint++;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("found checkpt block %d"TENDSTR),i));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found checkpt block %d"TENDSTR), i));
+ return;
+ }
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("found no more checkpt blocks"TENDSTR)));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found no more checkpt blocks"TENDSTR)));
+
+ dev->checkpointNextBlock = -1;
+ dev->checkpointCurrentBlock = -1;
+@@ -133,17 +129,17 @@ int yaffs_CheckpointOpen(yaffs_Device *d
+
+ /* Got the functions we need? */
+ if (!dev->writeChunkWithTagsToNAND ||
+- !dev->readChunkWithTagsFromNAND ||
+- !dev->eraseBlockInNAND ||
+- !dev->markNANDBlockBad)
++ !dev->readChunkWithTagsFromNAND ||
++ !dev->eraseBlockInNAND ||
++ !dev->markNANDBlockBad)
+ return 0;
+
+- if(forWriting && !yaffs_CheckpointSpaceOk(dev))
++ if (forWriting && !yaffs_CheckpointSpaceOk(dev))
+ return 0;
+
+- if(!dev->checkpointBuffer)
+- dev->checkpointBuffer = YMALLOC_DMA(dev->nDataBytesPerChunk);
+- if(!dev->checkpointBuffer)
++ if (!dev->checkpointBuffer)
++ dev->checkpointBuffer = YMALLOC_DMA(dev->totalBytesPerChunk);
++ if (!dev->checkpointBuffer)
+ return 0;
+
+
+@@ -159,12 +155,10 @@ int yaffs_CheckpointOpen(yaffs_Device *d
+ dev->checkpointNextBlock = dev->internalStartBlock;
+
+ /* Erase all the blocks in the checkpoint area */
+- if(forWriting){
+- memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
++ if (forWriting) {
++ memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
+ dev->checkpointByteOffset = 0;
+ return yaffs_CheckpointErase(dev);
+-
+-
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+@@ -174,7 +168,7 @@ int yaffs_CheckpointOpen(yaffs_Device *d
+ dev->blocksInCheckpoint = 0;
+ dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
+ dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
+- for(i = 0; i < dev->checkpointMaxBlocks; i++)
++ for (i = 0; i < dev->checkpointMaxBlocks; i++)
+ dev->checkpointBlockList[i] = -1;
+ }
+
+@@ -191,18 +185,17 @@ int yaffs_GetCheckpointSum(yaffs_Device
+
+ static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
+ {
+-
+ int chunk;
+ int realignedChunk;
+
+ yaffs_ExtendedTags tags;
+
+- if(dev->checkpointCurrentBlock < 0){
++ if (dev->checkpointCurrentBlock < 0) {
+ yaffs_CheckpointFindNextErasedBlock(dev);
+ dev->checkpointCurrentChunk = 0;
+ }
+
+- if(dev->checkpointCurrentBlock < 0)
++ if (dev->checkpointCurrentBlock < 0)
+ return 0;
+
+ tags.chunkDeleted = 0;
+@@ -210,10 +203,10 @@ static int yaffs_CheckpointFlushBuffer(y
+ tags.chunkId = dev->checkpointPageSequence + 1;
+ tags.sequenceNumber = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.byteCount = dev->nDataBytesPerChunk;
+- if(dev->checkpointCurrentChunk == 0){
++ if (dev->checkpointCurrentChunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointCurrentBlock);
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointCurrentBlock);
+ bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocksInCheckpoint++;
+ }
+@@ -221,28 +214,29 @@ static int yaffs_CheckpointFlushBuffer(y
+ chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
+
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
+- chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk,tags.objectId,tags.chunkId));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
++ chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk, tags.objectId, tags.chunkId));
+
+ realignedChunk = chunk - dev->chunkOffset;
+
+- dev->writeChunkWithTagsToNAND(dev,realignedChunk,dev->checkpointBuffer,&tags);
++ dev->writeChunkWithTagsToNAND(dev, realignedChunk,
++ dev->checkpointBuffer, &tags);
+ dev->checkpointByteOffset = 0;
+ dev->checkpointPageSequence++;
+ dev->checkpointCurrentChunk++;
+- if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock){
++ if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock) {
+ dev->checkpointCurrentChunk = 0;
+ dev->checkpointCurrentBlock = -1;
+ }
+- memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
++ memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
+
+ return 1;
+ }
+
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes)
++int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes)
+ {
+- int i=0;
++ int i = 0;
+ int ok = 1;
+
+
+@@ -250,17 +244,14 @@ int yaffs_CheckpointWrite(yaffs_Device *
+
+
+
+- if(!dev->checkpointBuffer)
++ if (!dev->checkpointBuffer)
+ return 0;
+
+- if(!dev->checkpointOpenForWrite)
++ if (!dev->checkpointOpenForWrite)
+ return -1;
+
+- while(i < nBytes && ok) {
+-
+-
+-
+- dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes ;
++ while (i < nBytes && ok) {
++ dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes;
+ dev->checkpointSum += *dataBytes;
+ dev->checkpointXor ^= *dataBytes;
+
+@@ -270,18 +261,17 @@ int yaffs_CheckpointWrite(yaffs_Device *
+ dev->checkpointByteCount++;
+
+
+- if(dev->checkpointByteOffset < 0 ||
++ if (dev->checkpointByteOffset < 0 ||
+ dev->checkpointByteOffset >= dev->nDataBytesPerChunk)
+ ok = yaffs_CheckpointFlushBuffer(dev);
+-
+ }
+
+- return i;
++ return i;
+ }
+
+ int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
+ {
+- int i=0;
++ int i = 0;
+ int ok = 1;
+ yaffs_ExtendedTags tags;
+
+@@ -291,52 +281,54 @@ int yaffs_CheckpointRead(yaffs_Device *d
+
+ __u8 *dataBytes = (__u8 *)data;
+
+- if(!dev->checkpointBuffer)
++ if (!dev->checkpointBuffer)
+ return 0;
+
+- if(dev->checkpointOpenForWrite)
++ if (dev->checkpointOpenForWrite)
+ return -1;
+
+- while(i < nBytes && ok) {
++ while (i < nBytes && ok) {
+
+
+- if(dev->checkpointByteOffset < 0 ||
+- dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
++ if (dev->checkpointByteOffset < 0 ||
++ dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
+
+- if(dev->checkpointCurrentBlock < 0){
++ if (dev->checkpointCurrentBlock < 0) {
+ yaffs_CheckpointFindNextCheckpointBlock(dev);
+ dev->checkpointCurrentChunk = 0;
+ }
+
+- if(dev->checkpointCurrentBlock < 0)
++ if (dev->checkpointCurrentBlock < 0)
+ ok = 0;
+ else {
+-
+- chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock +
+- dev->checkpointCurrentChunk;
++ chunk = dev->checkpointCurrentBlock *
++ dev->nChunksPerBlock +
++ dev->checkpointCurrentChunk;
+
+ realignedChunk = chunk - dev->chunkOffset;
+
+- /* read in the next chunk */
+- /* printf("read checkpoint page %d\n",dev->checkpointPage); */
+- dev->readChunkWithTagsFromNAND(dev, realignedChunk,
+- dev->checkpointBuffer,
+- &tags);
+-
+- if(tags.chunkId != (dev->checkpointPageSequence + 1) ||
+- tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+- ok = 0;
++ /* read in the next chunk */
++ /* printf("read checkpoint page %d\n",dev->checkpointPage); */
++ dev->readChunkWithTagsFromNAND(dev,
++ realignedChunk,
++ dev->checkpointBuffer,
++ &tags);
++
++ if (tags.chunkId != (dev->checkpointPageSequence + 1) ||
++ tags.eccResult > YAFFS_ECC_RESULT_FIXED ||
++ tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ ok = 0;
+
+ dev->checkpointByteOffset = 0;
+ dev->checkpointPageSequence++;
+ dev->checkpointCurrentChunk++;
+
+- if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
++ if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
+ dev->checkpointCurrentBlock = -1;
+ }
+ }
+
+- if(ok){
++ if (ok) {
+ *dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
+ dev->checkpointSum += *dataBytes;
+ dev->checkpointXor ^= *dataBytes;
+@@ -353,17 +345,17 @@ int yaffs_CheckpointRead(yaffs_Device *d
+ int yaffs_CheckpointClose(yaffs_Device *dev)
+ {
+
+- if(dev->checkpointOpenForWrite){
+- if(dev->checkpointByteOffset != 0)
++ if (dev->checkpointOpenForWrite) {
++ if (dev->checkpointByteOffset != 0)
+ yaffs_CheckpointFlushBuffer(dev);
+ } else {
+ int i;
+- for(i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++){
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointBlockList[i]);
+- if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
++ for (i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointBlockList[i]);
++ if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
+ bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+- // Todo this looks odd...
++ /* Todo this looks odd... */
+ }
+ }
+ YFREE(dev->checkpointBlockList);
+@@ -374,27 +366,25 @@ int yaffs_CheckpointClose(yaffs_Device *
+ dev->nErasedBlocks -= dev->blocksInCheckpoint;
+
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint byte count %d" TENDSTR),
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint byte count %d" TENDSTR),
+ dev->checkpointByteCount));
+
+- if(dev->checkpointBuffer){
++ if (dev->checkpointBuffer) {
+ /* free the buffer */
+ YFREE(dev->checkpointBuffer);
+ dev->checkpointBuffer = NULL;
+ return 1;
+- }
+- else
++ } else
+ return 0;
+-
+ }
+
+ int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
+ {
+ /* Erase the first checksum block */
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint invalidate"TENDSTR)));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate"TENDSTR)));
+
+- if(!yaffs_CheckpointSpaceOk(dev))
++ if (!yaffs_CheckpointSpaceOk(dev))
+ return 0;
+
+ return yaffs_CheckpointErase(dev);
+--- a/fs/yaffs2/yaffs_checkptrw.h
++++ b/fs/yaffs2/yaffs_checkptrw.h
+@@ -20,9 +20,9 @@
+
+ int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes);
++int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes);
+
+-int yaffs_CheckpointRead(yaffs_Device *dev,void *data, int nBytes);
++int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes);
+
+ int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
+
+--- a/fs/yaffs2/yaffs_ecc.c
++++ b/fs/yaffs2/yaffs_ecc.c
+@@ -29,7 +29,7 @@
+ */
+
+ const char *yaffs_ecc_c_version =
+- "$Id: yaffs_ecc.c,v 1.9 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_ecc.c,v 1.11 2009-03-06 17:20:50 wookey Exp $";
+
+ #include "yportenv.h"
+
+@@ -109,12 +109,10 @@ void yaffs_ECCCalculate(const unsigned c
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+- if (b & 0x01) // odd number of bits in the byte
+- {
++ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+-
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+@@ -158,7 +156,7 @@ void yaffs_ECCCalculate(const unsigned c
+ ecc[0] = ~t;
+
+ #ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+- // Swap the bytes into the wrong order
++ /* Swap the bytes into the wrong order */
+ t = ecc[0];
+ ecc[0] = ecc[1];
+ ecc[1] = t;
+@@ -189,7 +187,7 @@ int yaffs_ECCCorrect(unsigned char *data
+ unsigned bit;
+
+ #ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+- // swap the bytes to correct for the wrong order
++ /* swap the bytes to correct for the wrong order */
+ unsigned char t;
+
+ t = d0;
+@@ -251,7 +249,7 @@ int yaffs_ECCCorrect(unsigned char *data
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+ void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * eccOther)
++ yaffs_ECCOther *eccOther)
+ {
+ unsigned int i;
+
+@@ -278,8 +276,8 @@ void yaffs_ECCCalculateOther(const unsig
+ }
+
+ int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * read_ecc,
+- const yaffs_ECCOther * test_ecc)
++ yaffs_ECCOther *read_ecc,
++ const yaffs_ECCOther *test_ecc)
+ {
+ unsigned char cDelta; /* column parity delta */
+ unsigned lDelta; /* line parity delta */
+@@ -294,8 +292,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ return 0; /* no error */
+
+ if (lDelta == ~lDeltaPrime &&
+- (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15))
+- {
++ (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+@@ -307,7 +304,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ if (cDelta & 0x02)
+ bit |= 0x01;
+
+- if(lDelta >= nBytes)
++ if (lDelta >= nBytes)
+ return -1;
+
+ data[lDelta] ^= (1 << bit);
+@@ -316,7 +313,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ }
+
+ if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
+- yaffs_CountBits(cDelta)) == 1) {
++ yaffs_CountBits(cDelta)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+@@ -326,6 +323,4 @@ int yaffs_ECCCorrectOther(unsigned char
+ /* Unrecoverable error */
+
+ return -1;
+-
+ }
+-
+--- a/fs/yaffs2/yaffs_ecc.h
++++ b/fs/yaffs2/yaffs_ecc.h
+@@ -13,15 +13,15 @@
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+- /*
+- * This code implements the ECC algorithm used in SmartMedia.
+- *
+- * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+- * The two unused bit are set to 1.
+- * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+- * blocks are used on a 512-byte NAND page.
+- *
+- */
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
++ * blocks are used on a 512-byte NAND page.
++ *
++ */
+
+ #ifndef __YAFFS_ECC_H__
+ #define __YAFFS_ECC_H__
+@@ -34,11 +34,11 @@ typedef struct {
+
+ void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
+ int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
+- const unsigned char *test_ecc);
++ const unsigned char *test_ecc);
+
+ void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * ecc);
++ yaffs_ECCOther *ecc);
+ int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * read_ecc,
+- const yaffs_ECCOther * test_ecc);
++ yaffs_ECCOther *read_ecc,
++ const yaffs_ECCOther *test_ecc);
+ #endif
+--- a/fs/yaffs2/yaffs_fs.c
++++ b/fs/yaffs2/yaffs_fs.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2009 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -32,18 +32,17 @@
+ */
+
+ const char *yaffs_fs_c_version =
+- "$Id: yaffs_fs.c,v 1.63 2007-09-19 20:35:40 imcd Exp $";
++ "$Id: yaffs_fs.c,v 1.79 2009-03-17 01:12:00 wookey Exp $";
+ extern const char *yaffs_guts_c_version;
+
+ #include <linux/version.h>
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ #include <linux/config.h>
+ #endif
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+-#include <linux/list.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
+ #include <linux/smp_lock.h>
+@@ -53,10 +52,12 @@ extern const char *yaffs_guts_c_version;
+ #include <linux/string.h>
+ #include <linux/ctype.h>
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#include "asm/div64.h"
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ #include <linux/statfs.h> /* Added NCB 15-8-2003 */
+-#include <asm/statfs.h>
++#include <linux/statfs.h>
+ #define UnlockPage(p) unlock_page(p)
+ #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+@@ -69,22 +70,45 @@ extern const char *yaffs_guts_c_version;
+ #define BDEVNAME_SIZE 0
+ #define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+ /* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+ #define __user
+ #endif
+
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT (&proc_root)
++#else
++#define YPROC_ROOT NULL
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ #define WRITE_SIZE_STR "writesize"
+-#define WRITE_SIZE(mtd) (mtd)->writesize
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
+ #else
+ #define WRITE_SIZE_STR "oobblock"
+-#define WRITE_SIZE(mtd) (mtd)->oobblock
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+ #endif
+
+-#include <asm/uaccess.h>
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++ uint64_t result = partition_size;
++ do_div(result, block_size);
++ return (uint32_t)result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
+
+ #include "yportenv.h"
+ #include "yaffs_guts.h"
+@@ -96,28 +120,44 @@ extern const char *yaffs_guts_c_version;
+
+ unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
+ unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
+
+ /* Module Parameters */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+-module_param(yaffs_traceMask,uint,0644);
+-module_param(yaffs_wr_attempts,uint,0644);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_traceMask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++#else
++MODULE_PARM(yaffs_traceMask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++static void yaffs_read_inode(struct inode *inode);
++
+ #else
+-MODULE_PARM(yaffs_traceMask,"i");
+-MODULE_PARM(yaffs_wr_attempts,"i");
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
+ #endif
+
+ /*#define T(x) printk x */
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
+-#define yaffs_InodeToObjectLV(iptr) (iptr)->i_private
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
+ #else
+-#define yaffs_InodeToObjectLV(iptr) (iptr)->u.generic_ip
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
+ #endif
+
+ #define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
+ #define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ #define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->s_fs_info)
+ #else
+ #define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->u.generic_sbp)
+@@ -126,47 +166,49 @@ MODULE_PARM(yaffs_wr_attempts,"i");
+ static void yaffs_put_super(struct super_block *sb);
+
+ static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t * pos);
++ loff_t *pos);
++static ssize_t yaffs_hold_space(struct file *f);
++static void yaffs_release_space(struct file *f);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_file_flush(struct file *file, fl_owner_t id);
+ #else
+ static int yaffs_file_flush(struct file *file);
+ #endif
+
+ static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync);
++ int datasync);
+
+ static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n);
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n);
++ struct nameidata *n);
+ #else
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
+ #endif
+ static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry);
++ struct dentry *dentry);
+ static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
+ static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname);
++ const char *symname);
+ static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t dev);
++ dev_t dev);
+ #else
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int dev);
++ int dev);
+ #endif
+ static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+ static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_sync_fs(struct super_block *sb, int wait);
+ static void yaffs_write_super(struct super_block *sb);
+ #else
+@@ -174,33 +216,47 @@ static int yaffs_sync_fs(struct super_bl
+ static int yaffs_write_super(struct super_block *sb);
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
+ #else
+ static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
+ #endif
+-static void yaffs_read_inode(struct inode *inode);
+
++#ifdef YAFFS_HAS_PUT_INODE
+ static void yaffs_put_inode(struct inode *inode);
++#endif
++
+ static void yaffs_delete_inode(struct inode *);
+ static void yaffs_clear_inode(struct inode *);
+
+ static int yaffs_readpage(struct file *file, struct page *page);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
+ #else
+ static int yaffs_writepage(struct page *page);
+ #endif
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END != 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata);
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata);
++#else
+ static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to);
++ unsigned offset, unsigned to);
+ static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to);
++ unsigned to);
+
+-static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+- int buflen);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++#endif
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+ #else
+ static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+@@ -209,12 +265,17 @@ static int yaffs_follow_link(struct dent
+ static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++ .write_begin = yaffs_write_begin,
++ .write_end = yaffs_write_end,
++#else
+ .prepare_write = yaffs_prepare_write,
+ .commit_write = yaffs_commit_write,
++#endif
+ };
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+-static struct file_operations yaffs_file_operations = {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+@@ -224,11 +285,12 @@ static struct file_operations yaffs_file
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
++ .llseek = generic_file_llseek,
+ };
+
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+
+-static struct file_operations yaffs_file_operations = {
++static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+@@ -241,29 +303,29 @@ static struct file_operations yaffs_file
+
+ #else
+
+-static struct file_operations yaffs_file_operations = {
++static const struct file_operations yaffs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ .sendfile = generic_file_sendfile,
+ #endif
+ };
+ #endif
+
+-static struct inode_operations yaffs_file_inode_operations = {
++static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+ };
+
+-static struct inode_operations yaffs_symlink_inode_operations = {
++static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+ .setattr = yaffs_setattr,
+ };
+
+-static struct inode_operations yaffs_dir_inode_operations = {
++static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+@@ -276,16 +338,21 @@ static struct inode_operations yaffs_dir
+ .setattr = yaffs_setattr,
+ };
+
+-static struct file_operations yaffs_dir_operations = {
++static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+ };
+
+-static struct super_operations yaffs_super_ops = {
++static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
+ .read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
+ .put_inode = yaffs_put_inode,
++#endif
+ .put_super = yaffs_put_super,
+ .delete_inode = yaffs_delete_inode,
+ .clear_inode = yaffs_clear_inode,
+@@ -293,22 +360,21 @@ static struct super_operations yaffs_sup
+ .write_super = yaffs_write_super,
+ };
+
+-static void yaffs_GrossLock(yaffs_Device * dev)
++static void yaffs_GrossLock(yaffs_Device *dev)
+ {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs locking\n"));
+-
++ T(YAFFS_TRACE_OS, ("yaffs locking %p\n", current));
+ down(&dev->grossLock);
++ T(YAFFS_TRACE_OS, ("yaffs locked %p\n", current));
+ }
+
+-static void yaffs_GrossUnlock(yaffs_Device * dev)
++static void yaffs_GrossUnlock(yaffs_Device *dev)
+ {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs unlocking\n"));
++ T(YAFFS_TRACE_OS, ("yaffs unlocking %p\n", current));
+ up(&dev->grossLock);
+-
+ }
+
+-static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+- int buflen)
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen)
+ {
+ unsigned char *alias;
+ int ret;
+@@ -329,7 +395,7 @@ static int yaffs_readlink(struct dentry
+ return ret;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ #else
+ static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -345,32 +411,31 @@ static int yaffs_follow_link(struct dent
+
+ yaffs_GrossUnlock(dev);
+
+- if (!alias)
+- {
++ if (!alias) {
+ ret = -ENOMEM;
+ goto out;
+- }
++ }
+
+ ret = vfs_follow_link(nd, alias);
+ kfree(alias);
+ out:
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+- return ERR_PTR (ret);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ return ERR_PTR(ret);
+ #else
+ return ret;
+ #endif
+ }
+
+ struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object * obj);
++ yaffs_Object *obj);
+
+ /*
+ * Lookup is used to find objects in the fs
+ */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n)
++ struct nameidata *n)
+ #else
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+ #endif
+@@ -383,12 +448,11 @@ static struct dentry *yaffs_lookup(struc
+ yaffs_GrossLock(dev);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_lookup for %d:%s\n",
+- yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
++ ("yaffs_lookup for %d:%s\n",
++ yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
+
+- obj =
+- yaffs_FindObjectByName(yaffs_InodeToObject(dir),
+- dentry->d_name.name);
++ obj = yaffs_FindObjectByName(yaffs_InodeToObject(dir),
++ dentry->d_name.name);
+
+ obj = yaffs_GetEquivalentObject(obj); /* in case it was a hardlink */
+
+@@ -397,13 +461,13 @@ static struct dentry *yaffs_lookup(struc
+
+ if (obj) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_lookup found %d\n", obj->objectId));
++ ("yaffs_lookup found %d\n", obj->objectId));
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+ if (inode) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_loookup dentry \n"));
++ ("yaffs_loookup dentry \n"));
+ /* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
+ * d_add even if NULL inode */
+ #if 0
+@@ -416,7 +480,7 @@ static struct dentry *yaffs_lookup(struc
+ }
+
+ } else {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_lookup not found\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_lookup not found\n"));
+
+ }
+
+@@ -425,20 +489,22 @@ static struct dentry *yaffs_lookup(struc
+ d_add(dentry, inode);
+
+ return NULL;
+- /* return (ERR_PTR(-EIO)); */
+-
+ }
+
++
++#ifdef YAFFS_HAS_PUT_INODE
++
+ /* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+ static void yaffs_put_inode(struct inode *inode)
+ {
+ T(YAFFS_TRACE_OS,
+- ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count)));
++ ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
++ atomic_read(&inode->i_count)));
+
+ }
++#endif
+
+ /* clear is called to tell the fs to release any per-inode data it holds */
+ static void yaffs_clear_inode(struct inode *inode)
+@@ -449,9 +515,9 @@ static void yaffs_clear_inode(struct ino
+ obj = yaffs_InodeToObject(inode);
+
+ T(YAFFS_TRACE_OS,
+- ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
++ ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
+
+ if (obj) {
+ dev = obj->myDev;
+@@ -486,23 +552,23 @@ static void yaffs_delete_inode(struct in
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+- ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
++ ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
+
+ if (obj) {
+ dev = obj->myDev;
+ yaffs_GrossLock(dev);
+- yaffs_DeleteFile(obj);
++ yaffs_DeleteObject(obj);
+ yaffs_GrossUnlock(dev);
+ }
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+- truncate_inode_pages (&inode->i_data, 0);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ truncate_inode_pages(&inode->i_data, 0);
+ #endif
+ clear_inode(inode);
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_file_flush(struct file *file, fl_owner_t id)
+ #else
+ static int yaffs_file_flush(struct file *file)
+@@ -513,8 +579,8 @@ static int yaffs_file_flush(struct file
+ yaffs_Device *dev = obj->myDev;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_file_flush object %d (%s)\n", obj->objectId,
+- obj->dirty ? "dirty" : "clean"));
++ ("yaffs_file_flush object %d (%s)\n", obj->objectId,
++ obj->dirty ? "dirty" : "clean"));
+
+ yaffs_GrossLock(dev);
+
+@@ -535,15 +601,15 @@ static int yaffs_readpage_nolock(struct
+
+ yaffs_Device *dev;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
+- (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+- (unsigned)PAGE_CACHE_SIZE));
++ T(YAFFS_TRACE_OS, ("yaffs_readpage at %08x, size %08x\n",
++ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
++ (unsigned)PAGE_CACHE_SIZE));
+
+ obj = yaffs_DentryToObject(f->f_dentry);
+
+ dev = obj->myDev;
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ BUG_ON(!PageLocked(pg));
+ #else
+ if (!PageLocked(pg))
+@@ -555,9 +621,9 @@ static int yaffs_readpage_nolock(struct
+
+ yaffs_GrossLock(dev);
+
+- ret =
+- yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
+- PAGE_CACHE_SIZE);
++ ret = yaffs_ReadDataFromFile(obj, pg_buf,
++ pg->index << PAGE_CACHE_SHIFT,
++ PAGE_CACHE_SIZE);
+
+ yaffs_GrossUnlock(dev);
+
+@@ -575,7 +641,7 @@ static int yaffs_readpage_nolock(struct
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_readpage done\n"));
+ return ret;
+ }
+
+@@ -593,7 +659,7 @@ static int yaffs_readpage(struct file *f
+
+ /* writepage inspired by/stolen from smbfs */
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+ #else
+ static int yaffs_writepage(struct page *page)
+@@ -616,12 +682,11 @@ static int yaffs_writepage(struct page *
+
+ if (offset > inode->i_size) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_writepage at %08x, inode size = %08x!!!\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT),
+- (unsigned)inode->i_size));
++ ("yaffs_writepage at %08x, inode size = %08x!!!\n",
++ (unsigned)(page->index << PAGE_CACHE_SHIFT),
++ (unsigned)inode->i_size));
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG " -> don't care!!\n"));
++ (" -> don't care!!\n"));
+ unlock_page(page);
+ return 0;
+ }
+@@ -629,11 +694,10 @@ static int yaffs_writepage(struct page *
+ end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+
+ /* easy case */
+- if (page->index < end_index) {
++ if (page->index < end_index)
+ nBytes = PAGE_CACHE_SIZE;
+- } else {
++ else
+ nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
+- }
+
+ get_page(page);
+
+@@ -643,19 +707,18 @@ static int yaffs_writepage(struct page *
+ yaffs_GrossLock(obj->myDev);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_writepage at %08x, size %08x\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
++ ("yaffs_writepage at %08x, size %08x\n",
++ (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "writepag0: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++ ("writepag0: obj = %05x, ino = %05x\n",
++ (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+
+- nWritten =
+- yaffs_WriteDataToFile(obj, buffer, page->index << PAGE_CACHE_SHIFT,
+- nBytes, 0);
++ nWritten = yaffs_WriteDataToFile(obj, buffer,
++ page->index << PAGE_CACHE_SHIFT, nBytes, 0);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "writepag1: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++ ("writepag1: obj = %05x, ino = %05x\n",
++ (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+
+ yaffs_GrossUnlock(obj->myDev);
+
+@@ -667,100 +730,207 @@ static int yaffs_writepage(struct page *
+ return (nWritten == nBytes) ? 0 : -ENOSPC;
+ }
+
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata)
++{
++ struct page *pg = NULL;
++ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++ uint32_t offset = pos & (PAGE_CACHE_SIZE - 1);
++ uint32_t to = offset + len;
++
++ int ret = 0;
++ int space_held = 0;
++
++ T(YAFFS_TRACE_OS, ("start yaffs_write_begin\n"));
++ /* Get a page */
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)
++ pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++ pg = __grab_cache_page(mapping, index);
++#endif
++
++ *pagep = pg;
++ if (!pg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ /* Get fs space */
++ space_held = yaffs_hold_space(filp);
++
++ if (!space_held) {
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ /* Update page if required */
++
++ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
++ ret = yaffs_readpage_nolock(filp, pg);
++
++ if (ret)
++ goto out;
++
++ /* Happy path return */
++ T(YAFFS_TRACE_OS, ("end yaffs_write_begin - ok\n"));
++
++ return 0;
++
++out:
++ T(YAFFS_TRACE_OS, ("end yaffs_write_begin fail returning %d\n", ret));
++ if (space_held)
++ yaffs_release_space(filp);
++ if (pg) {
++ unlock_page(pg);
++ page_cache_release(pg);
++ }
++ return ret;
++}
++
++#else
++
+ static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to)
++ unsigned offset, unsigned to)
+ {
++ T(YAFFS_TRACE_OS, ("yaffs_prepair_write\n"));
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_prepair_write\n"));
+ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+ return yaffs_readpage_nolock(f, pg);
+-
+ return 0;
++}
++#endif
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata)
++{
++ int ret = 0;
++ void *addr, *kva;
++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++ kva = kmap(pg);
++ addr = kva + offset_into_page;
++
++ T(YAFFS_TRACE_OS,
++ ("yaffs_write_end addr %x pos %x nBytes %d\n",
++ (unsigned) addr,
++ (int)pos, copied));
++
++ ret = yaffs_file_write(filp, addr, copied, &pos);
++
++ if (ret != copied) {
++ T(YAFFS_TRACE_OS,
++ ("yaffs_write_end not same size ret %d copied %d\n",
++ ret, copied));
++ SetPageError(pg);
++ ClearPageUptodate(pg);
++ } else {
++ SetPageUptodate(pg);
++ }
++
++ kunmap(pg);
+
++ yaffs_release_space(filp);
++ unlock_page(pg);
++ page_cache_release(pg);
++ return ret;
+ }
++#else
+
+ static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to)
++ unsigned to)
+ {
++ void *addr, *kva;
+
+- void *addr = page_address(pg) + offset;
+ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+ int nBytes = to - offset;
+ int nWritten;
+
+ unsigned spos = pos;
+- unsigned saddr = (unsigned)addr;
++ unsigned saddr;
++
++ kva = kmap(pg);
++ addr = kva + offset;
++
++ saddr = (unsigned) addr;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_commit_write addr %x pos %x nBytes %d\n", saddr,
+- spos, nBytes));
++ ("yaffs_commit_write addr %x pos %x nBytes %d\n",
++ saddr, spos, nBytes));
+
+ nWritten = yaffs_file_write(f, addr, nBytes, &pos);
+
+ if (nWritten != nBytes) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_commit_write not same size nWritten %d nBytes %d\n",
+- nWritten, nBytes));
++ ("yaffs_commit_write not same size nWritten %d nBytes %d\n",
++ nWritten, nBytes));
+ SetPageError(pg);
+ ClearPageUptodate(pg);
+ } else {
+ SetPageUptodate(pg);
+ }
+
++ kunmap(pg);
++
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_commit_write returning %d\n",
+- nWritten == nBytes ? 0 : nWritten));
++ ("yaffs_commit_write returning %d\n",
++ nWritten == nBytes ? 0 : nWritten));
+
+ return nWritten == nBytes ? 0 : nWritten;
+-
+ }
++#endif
++
+
+-static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object * obj)
++static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object *obj)
+ {
+ if (inode && obj) {
+
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+- __u32 mode = obj->yst_mode;
+- switch( obj->variantType ){
+- case YAFFS_OBJECT_TYPE_FILE :
+- if( ! S_ISREG(mode) ){
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFREG;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK :
+- if( ! S_ISLNK(mode) ){
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFLNK;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY :
+- if( ! S_ISDIR(mode) ){
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFDIR;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN :
+- case YAFFS_OBJECT_TYPE_HARDLINK :
+- case YAFFS_OBJECT_TYPE_SPECIAL :
+- default:
+- /* TODO? */
+- break;
+- }
++ __u32 mode = obj->yst_mode;
++ switch (obj->variantType) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (!S_ISREG(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFREG;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (!S_ISLNK(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFLNK;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!S_ISDIR(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFDIR;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ default:
++ /* TODO? */
++ break;
++ }
++
++ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->objectId;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ inode->i_blksize = inode->i_sb->s_blocksize;
+ #endif
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+@@ -781,26 +951,25 @@ static void yaffs_FillInodeFromObject(st
+ inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
+- inode->i_mode, inode->i_uid, inode->i_gid,
+- (int)inode->i_size, atomic_read(&inode->i_count)));
++ ("yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
++ inode->i_mode, inode->i_uid, inode->i_gid,
++ (int)inode->i_size, atomic_read(&inode->i_count)));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ init_special_inode(inode, obj->yst_mode,
+- old_decode_dev(obj->yst_rdev));
++ old_decode_dev(obj->yst_rdev));
+ #else
+ init_special_inode(inode, obj->yst_mode,
+- (dev_t) (obj->yst_rdev));
++ (dev_t) (obj->yst_rdev));
+ #endif
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+- &yaffs_file_address_operations;
++ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+@@ -817,34 +986,36 @@ static void yaffs_FillInodeFromObject(st
+
+ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_FileInode invalid parameters\n"));
++ ("yaffs_FileInode invalid parameters\n"));
+ }
+
+ }
+
+ struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object * obj)
++ yaffs_Object *obj)
+ {
+ struct inode *inode;
+
+ if (!sb) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_get_inode for NULL super_block!!\n"));
++ ("yaffs_get_inode for NULL super_block!!\n"));
+ return NULL;
+
+ }
+
+ if (!obj) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_get_inode for NULL object!!\n"));
++ ("yaffs_get_inode for NULL object!!\n"));
+ return NULL;
+
+ }
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_get_inode for object %d\n", obj->objectId));
++ ("yaffs_get_inode for object %d\n", obj->objectId));
+
+- inode = iget(sb, obj->objectId);
++ inode = Y_IGET(sb, obj->objectId);
++ if (IS_ERR(inode))
++ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+@@ -854,7 +1025,7 @@ struct inode *yaffs_get_inode(struct sup
+ }
+
+ static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t * pos)
++ loff_t *pos)
+ {
+ yaffs_Object *obj;
+ int nWritten, ipos;
+@@ -869,28 +1040,26 @@ static ssize_t yaffs_file_write(struct f
+
+ inode = f->f_dentry->d_inode;
+
+- if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) {
++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+- } else {
++ else
+ ipos = *pos;
+- }
+
+- if (!obj) {
++ if (!obj)
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_file_write: hey obj is null!\n"));
+- } else {
++ ("yaffs_file_write: hey obj is null!\n"));
++ else
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_file_write about to write writing %d bytes"
+- "to object %d at %d\n",
+- n, obj->objectId, ipos));
+- }
++ ("yaffs_file_write about to write writing %zu bytes"
++ "to object %d at %d\n",
++ n, obj->objectId, ipos));
+
+ nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_file_write writing %d bytes, %d written at %d\n",
+- n, nWritten, ipos));
++ ("yaffs_file_write writing %zu bytes, %d written at %d\n",
++ n, nWritten, ipos));
++
+ if (nWritten > 0) {
+ ipos += nWritten;
+ *pos = ipos;
+@@ -899,10 +1068,9 @@ static ssize_t yaffs_file_write(struct f
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_file_write size updated to %d bytes, "
+- "%d blocks\n",
+- ipos, (int)(inode->i_blocks)));
++ ("yaffs_file_write size updated to %d bytes, "
++ "%d blocks\n",
++ ipos, (int)(inode->i_blocks)));
+ }
+
+ }
+@@ -910,13 +1078,54 @@ static ssize_t yaffs_file_write(struct f
+ return nWritten == 0 ? -ENOSPC : nWritten;
+ }
+
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++ yaffs_Object *obj;
++ yaffs_Device *dev;
++
++ int nFreeChunks;
++
++
++ obj = yaffs_DentryToObject(f->f_dentry);
++
++ dev = obj->myDev;
++
++ yaffs_GrossLock(dev);
++
++ nFreeChunks = yaffs_GetNumberOfFreeChunks(dev);
++
++ yaffs_GrossUnlock(dev);
++
++ return (nFreeChunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++ yaffs_Object *obj;
++ yaffs_Device *dev;
++
++
++ obj = yaffs_DentryToObject(f->f_dentry);
++
++ dev = obj->myDev;
++
++ yaffs_GrossLock(dev);
++
++
++ yaffs_GrossUnlock(dev);
++}
++
+ static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+ {
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+- struct list_head *i;
++ struct ylist_head *i;
+ yaffs_Object *l;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+@@ -932,24 +1141,20 @@ static int yaffs_readdir(struct file *f,
+
+ if (offset == 0) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_readdir: entry . ino %d \n",
+- (int)inode->i_ino));
+- if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR)
+- < 0) {
++ ("yaffs_readdir: entry . ino %d \n",
++ (int)inode->i_ino));
++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0)
+ goto out;
+- }
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_readdir: entry .. ino %d \n",
+- (int)f->f_dentry->d_parent->d_inode->i_ino));
+- if (filldir
+- (dirent, "..", 2, offset,
+- f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
++ ("yaffs_readdir: entry .. ino %d \n",
++ (int)f->f_dentry->d_parent->d_inode->i_ino));
++ if (filldir(dirent, "..", 2, offset,
++ f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+ goto out;
+- }
+ offset++;
+ f->f_pos++;
+ }
+@@ -965,35 +1170,32 @@ static int yaffs_readdir(struct file *f,
+ f->f_version = inode->i_version;
+ }
+
+- list_for_each(i, &obj->variant.directoryVariant.children) {
++ ylist_for_each(i, &obj->variant.directoryVariant.children) {
+ curoffs++;
+ if (curoffs >= offset) {
+- l = list_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_Object, siblings);
+
+ yaffs_GetObjectName(l, name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_readdir: %s inode %d\n", name,
++ ("yaffs_readdir: %s inode %d\n", name,
+ yaffs_GetObjectInode(l)));
+
+ if (filldir(dirent,
+- name,
+- strlen(name),
+- offset,
+- yaffs_GetObjectInode(l),
+- yaffs_GetObjectType(l))
+- < 0) {
++ name,
++ strlen(name),
++ offset,
++ yaffs_GetObjectInode(l),
++ yaffs_GetObjectType(l)) < 0)
+ goto up_and_out;
+- }
+
+ offset++;
+ f->f_pos++;
+ }
+ }
+
+- up_and_out:
+- out:
+-
++up_and_out:
++out:
+ yaffs_GrossUnlock(dev);
+
+ return 0;
+@@ -1002,12 +1204,19 @@ static int yaffs_readdir(struct file *f,
+ /*
+ * File creation. Allocate an inode, and we're done..
+ */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t rdev)
++ dev_t rdev)
+ #else
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int rdev)
++ int rdev)
+ #endif
+ {
+ struct inode *inode;
+@@ -1018,25 +1227,25 @@ static int yaffs_mknod(struct inode *dir
+ yaffs_Object *parent = yaffs_InodeToObject(dir);
+
+ int error = -ENOSPC;
+- uid_t uid = current->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+
+- if((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod: parent object %d type %d\n",
+- parent->objectId, parent->variantType));
++ ("yaffs_mknod: parent object %d type %d\n",
++ parent->objectId, parent->variantType));
+ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod: could not get parent object\n"));
++ ("yaffs_mknod: could not get parent object\n"));
+ return -EPERM;
+ }
+
+ T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
+- "mode %x dev %x\n",
+- dentry->d_name.name, mode, rdev));
++ "mode %x dev %x\n",
++ dentry->d_name.name, mode, rdev));
+
+ dev = parent->myDev;
+
+@@ -1045,33 +1254,28 @@ static int yaffs_mknod(struct inode *dir
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG
+- "yaffs_mknod: making special\n"));
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+- obj =
+- yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, old_encode_dev(rdev));
+-#else
+- obj =
+- yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, rdev);
++ T(YAFFS_TRACE_OS, ("yaffs_mknod: making special\n"));
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++ gid, old_encode_dev(rdev));
++#else
++ obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++ gid, rdev);
+ #endif
+ break;
+ case S_IFREG: /* file */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
+- obj =
+- yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
+- gid);
++ T(YAFFS_TRACE_OS, ("yaffs_mknod: making file\n"));
++ obj = yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
++ gid);
+ break;
+ case S_IFDIR: /* directory */
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod: making directory\n"));
+- obj =
+- yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
+- uid, gid);
++ ("yaffs_mknod: making directory\n"));
++ obj = yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
++ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_mknod: making symlink\n"));
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+@@ -1083,12 +1287,12 @@ static int yaffs_mknod(struct inode *dir
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod created object %d count = %d\n",
+- obj->objectId, atomic_read(&inode->i_count)));
++ ("yaffs_mknod created object %d count = %d\n",
++ obj->objectId, atomic_read(&inode->i_count)));
+ error = 0;
+ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod failed making object\n"));
++ ("yaffs_mknod failed making object\n"));
+ error = -ENOMEM;
+ }
+
+@@ -1098,25 +1302,19 @@ static int yaffs_mknod(struct inode *dir
+ static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+ {
+ int retVal;
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mkdir\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_mkdir\n"));
+ retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+-#if 0
+- /* attempt to fix dir bug - didn't work */
+- if (!retVal) {
+- dget(dentry);
+- }
+-#endif
+ return retVal;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+ #else
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+ #endif
+ {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_create\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_create\n"));
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+ }
+
+@@ -1127,8 +1325,8 @@ static int yaffs_unlink(struct inode *di
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_unlink %d:%s\n", (int)(dir->i_ino),
+- dentry->d_name.name));
++ ("yaffs_unlink %d:%s\n", (int)(dir->i_ino),
++ dentry->d_name.name));
+
+ dev = yaffs_InodeToObject(dir)->myDev;
+
+@@ -1151,82 +1349,74 @@ static int yaffs_unlink(struct inode *di
+ * Create a link...
+ */
+ static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry)
++ struct dentry *dentry)
+ {
+ struct inode *inode = old_dentry->d_inode;
+ yaffs_Object *obj = NULL;
+ yaffs_Object *link = NULL;
+ yaffs_Device *dev;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_link\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_link\n"));
+
+ obj = yaffs_InodeToObject(inode);
+ dev = obj->myDev;
+
+ yaffs_GrossLock(dev);
+
+- if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+- {
+- link =
+- yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
+- obj);
+- }
++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
++ link = yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
++ obj);
+
+ if (link) {
+ old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_link link count %d i_count %d\n",
+- old_dentry->d_inode->i_nlink,
+- atomic_read(&old_dentry->d_inode->i_count)));
+-
++ ("yaffs_link link count %d i_count %d\n",
++ old_dentry->d_inode->i_nlink,
++ atomic_read(&old_dentry->d_inode->i_count)));
+ }
+
+ yaffs_GrossUnlock(dev);
+
+- if (link) {
+-
++ if (link)
+ return 0;
+- }
+
+ return -EPERM;
+ }
+
+ static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname)
++ const char *symname)
+ {
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+- uid_t uid = current->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_symlink\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_symlink\n"));
+
+ dev = yaffs_InodeToObject(dir)->myDev;
+ yaffs_GrossLock(dev);
+ obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
+- S_IFLNK | S_IRWXUGO, uid, gid, symname);
++ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_GrossUnlock(dev);
+
+ if (obj) {
+-
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink created OK\n"));
++ T(YAFFS_TRACE_OS, ("symlink created OK\n"));
+ return 0;
+ } else {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink not created\n"));
+-
++ T(YAFFS_TRACE_OS, ("symlink not created\n"));
+ }
+
+ return -ENOMEM;
+ }
+
+ static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync)
++ int datasync)
+ {
+
+ yaffs_Object *obj;
+@@ -1236,7 +1426,7 @@ static int yaffs_sync_object(struct file
+
+ dev = obj->myDev;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_object\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_sync_object\n"));
+ yaffs_GrossLock(dev);
+ yaffs_FlushFile(obj, 1);
+ yaffs_GrossUnlock(dev);
+@@ -1255,41 +1445,36 @@ static int yaffs_rename(struct inode *ol
+ int retVal = YAFFS_FAIL;
+ yaffs_Object *target;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_rename\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_rename\n"));
+ dev = yaffs_InodeToObject(old_dir)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+- target =
+- yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
++ target = yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
+
+
+
+- if (target &&
+- target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !list_empty(&target->variant.directoryVariant.children)) {
++ if (target && target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&target->variant.directoryVariant.children)) {
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "target is non-empty dir\n"));
++ T(YAFFS_TRACE_OS, ("target is non-empty dir\n"));
+
+ retVal = YAFFS_FAIL;
+ } else {
+-
+ /* Now does unlinking internally using shadowing mechanism */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "calling yaffs_RenameObject\n"));
+-
+- retVal =
+- yaffs_RenameObject(yaffs_InodeToObject(old_dir),
+- old_dentry->d_name.name,
+- yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
++ T(YAFFS_TRACE_OS, ("calling yaffs_RenameObject\n"));
+
++ retVal = yaffs_RenameObject(yaffs_InodeToObject(old_dir),
++ old_dentry->d_name.name,
++ yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
+ }
+ yaffs_GrossUnlock(dev);
+
+ if (retVal == YAFFS_OK) {
+- if(target) {
++ if (target) {
+ new_dentry->d_inode->i_nlink--;
+ mark_inode_dirty(new_dentry->d_inode);
+ }
+@@ -1298,7 +1483,6 @@ static int yaffs_rename(struct inode *ol
+ } else {
+ return -ENOTEMPTY;
+ }
+-
+ }
+
+ static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+@@ -1308,15 +1492,15 @@ static int yaffs_setattr(struct dentry *
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_setattr of object %d\n",
+- yaffs_InodeToObject(inode)->objectId));
+-
+- if ((error = inode_change_ok(inode, attr)) == 0) {
++ ("yaffs_setattr of object %d\n",
++ yaffs_InodeToObject(inode)->objectId));
+
++ error = inode_change_ok(inode, attr);
++ if (error == 0) {
+ dev = yaffs_InodeToObject(inode)->myDev;
+ yaffs_GrossLock(dev);
+ if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
+- YAFFS_OK) {
++ YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+@@ -1328,12 +1512,12 @@ static int yaffs_setattr(struct dentry *
+ return error;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+ struct super_block *sb = dentry->d_sb;
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+ {
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+@@ -1343,32 +1527,53 @@ static int yaffs_statfs(struct super_blo
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+ #endif
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_statfs\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_statfs\n"));
+
+ yaffs_GrossLock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+- if (sb->s_blocksize > dev->nDataBytesPerChunk) {
++
++ if (dev->nDataBytesPerChunk & (dev->nDataBytesPerChunk - 1)) {
++ /* Do this if chunk size is not a power of 2 */
++
++ uint64_t bytesInDev;
++ uint64_t bytesFree;
++
++ bytesInDev = ((uint64_t)((dev->endBlock - dev->startBlock + 1))) *
++ ((uint64_t)(dev->nChunksPerBlock * dev->nDataBytesPerChunk));
++
++ do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
++ buf->f_blocks = bytesInDev;
++
++ bytesFree = ((uint64_t)(yaffs_GetNumberOfFreeChunks(dev))) *
++ ((uint64_t)(dev->nDataBytesPerChunk));
++
++ do_div(bytesFree, sb->s_blocksize);
++
++ buf->f_bfree = bytesFree;
++
++ } else if (sb->s_blocksize > dev->nDataBytesPerChunk) {
+
+ buf->f_blocks =
+- (dev->endBlock - dev->startBlock +
+- 1) * dev->nChunksPerBlock / (sb->s_blocksize /
+- dev->nDataBytesPerChunk);
++ (dev->endBlock - dev->startBlock + 1) *
++ dev->nChunksPerBlock /
++ (sb->s_blocksize / dev->nDataBytesPerChunk);
+ buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) / (sb->s_blocksize /
+- dev->nDataBytesPerChunk);
++ yaffs_GetNumberOfFreeChunks(dev) /
++ (sb->s_blocksize / dev->nDataBytesPerChunk);
+ } else {
+-
+ buf->f_blocks =
+- (dev->endBlock - dev->startBlock +
+- 1) * dev->nChunksPerBlock * (dev->nDataBytesPerChunk /
+- sb->s_blocksize);
++ (dev->endBlock - dev->startBlock + 1) *
++ dev->nChunksPerBlock *
++ (dev->nDataBytesPerChunk / sb->s_blocksize);
++
+ buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) * (dev->nDataBytesPerChunk /
+- sb->s_blocksize);
++ yaffs_GetNumberOfFreeChunks(dev) *
++ (dev->nDataBytesPerChunk / sb->s_blocksize);
+ }
++
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+@@ -1378,18 +1583,19 @@ static int yaffs_statfs(struct super_blo
+ }
+
+
+-/**
+ static int yaffs_do_sync_fs(struct super_block *sb)
+ {
+
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_do_sync_fs\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_do_sync_fs\n"));
+
+- if(sb->s_dirt) {
++ if (sb->s_dirt) {
+ yaffs_GrossLock(dev);
+
+- if(dev)
++ if (dev) {
++ yaffs_FlushEntireDeviceCache(dev);
+ yaffs_CheckpointSave(dev);
++ }
+
+ yaffs_GrossUnlock(dev);
+
+@@ -1397,35 +1603,73 @@ static int yaffs_do_sync_fs(struct super
+ }
+ return 0;
+ }
+-**/
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static void yaffs_write_super(struct super_block *sb)
+ #else
+ static int yaffs_write_super(struct super_block *sb)
+ #endif
+ {
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_write_super\n"));
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+- return 0; /* yaffs_do_sync_fs(sb);*/
++ T(YAFFS_TRACE_OS, ("yaffs_write_super\n"));
++ if (yaffs_auto_checkpoint >= 2)
++ yaffs_do_sync_fs(sb);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ return 0;
+ #endif
+ }
+
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_sync_fs(struct super_block *sb, int wait)
+ #else
+ static int yaffs_sync_fs(struct super_block *sb)
+ #endif
+ {
++ T(YAFFS_TRACE_OS, ("yaffs_sync_fs\n"));
++
++ if (yaffs_auto_checkpoint >= 1)
++ yaffs_do_sync_fs(sb);
++
++ return 0;
++}
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++ struct inode *inode;
++ yaffs_Object *obj;
++ yaffs_Device *dev = yaffs_SuperToDevice(sb);
++
++ T(YAFFS_TRACE_OS,
++ ("yaffs_iget for %lu\n", ino));
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_fs\n"));
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
+
+- return 0; /* yaffs_do_sync_fs(sb);*/
++ yaffs_GrossLock(dev);
+
++ obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
++
++ yaffs_FillInodeFromObject(inode, obj);
++
++ yaffs_GrossUnlock(dev);
++
++ unlock_new_inode(inode);
++ return inode;
+ }
+
++#else
+
+ static void yaffs_read_inode(struct inode *inode)
+ {
+@@ -1438,7 +1682,7 @@ static void yaffs_read_inode(struct inod
+ yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_read_inode for %d\n", (int)inode->i_ino));
++ ("yaffs_read_inode for %d\n", (int)inode->i_ino));
+
+ yaffs_GrossLock(dev);
+
+@@ -1449,18 +1693,20 @@ static void yaffs_read_inode(struct inod
+ yaffs_GrossUnlock(dev);
+ }
+
+-static LIST_HEAD(yaffs_dev_list);
++#endif
++
++static YLIST_HEAD(yaffs_dev_list);
+
+-#if 0 // not used
++#if 0 /* not used */
+ static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+ {
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+
+- if( *flags & MS_RDONLY ) {
++ if (*flags & MS_RDONLY) {
+ struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_remount_fs: %s: RO\n", dev->name ));
++ ("yaffs_remount_fs: %s: RO\n", dev->name));
+
+ yaffs_GrossLock(dev);
+
+@@ -1472,10 +1718,9 @@ static int yaffs_remount_fs(struct super
+ mtd->sync(mtd);
+
+ yaffs_GrossUnlock(dev);
+- }
+- else {
++ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_remount_fs: %s: RW\n", dev->name ));
++ ("yaffs_remount_fs: %s: RW\n", dev->name));
+ }
+
+ return 0;
+@@ -1486,7 +1731,7 @@ static void yaffs_put_super(struct super
+ {
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_put_super\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_put_super\n"));
+
+ yaffs_GrossLock(dev);
+
+@@ -1494,18 +1739,17 @@ static void yaffs_put_super(struct super
+
+ yaffs_CheckpointSave(dev);
+
+- if (dev->putSuperFunc) {
++ if (dev->putSuperFunc)
+ dev->putSuperFunc(sb);
+- }
+
+ yaffs_Deinitialise(dev);
+
+ yaffs_GrossUnlock(dev);
+
+ /* we assume this is protected by lock_kernel() in mount/umount */
+- list_del(&dev->devList);
++ ylist_del(&dev->devList);
+
+- if(dev->spareBuffer){
++ if (dev->spareBuffer) {
+ YFREE(dev->spareBuffer);
+ dev->spareBuffer = NULL;
+ }
+@@ -1516,12 +1760,10 @@ static void yaffs_put_super(struct super
+
+ static void yaffs_MTDPutSuper(struct super_block *sb)
+ {
+-
+ struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+
+- if (mtd->sync) {
++ if (mtd->sync)
+ mtd->sync(mtd);
+- }
+
+ put_mtd_device(mtd);
+ }
+@@ -1531,9 +1773,9 @@ static void yaffs_MarkSuperBlockDirty(vo
+ {
+ struct super_block *sb = (struct super_block *)vsb;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_MarkSuperBlockDirty() sb = %p\n",sb));
+-// if(sb)
+-// sb->s_dirt = 1;
++ T(YAFFS_TRACE_OS, ("yaffs_MarkSuperBlockDirty() sb = %p\n", sb));
++ if (sb)
++ sb->s_dirt = 1;
+ }
+
+ typedef struct {
+@@ -1546,48 +1788,48 @@ typedef struct {
+ #define MAX_OPT_LEN 20
+ static int yaffs_parse_options(yaffs_options *options, const char *options_str)
+ {
+- char cur_opt[MAX_OPT_LEN+1];
++ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+- while(options_str && *options_str && !error){
+- memset(cur_opt,0,MAX_OPT_LEN+1);
++ while (options_str && *options_str && !error) {
++ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+- while(*options_str && *options_str != ','){
+- if(p < MAX_OPT_LEN){
++ while (*options_str && *options_str != ',') {
++ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+- if(!strcmp(cur_opt,"inband-tags"))
++ if (!strcmp(cur_opt, "inband-tags"))
+ options->inband_tags = 1;
+- else if(!strcmp(cur_opt,"no-cache"))
++ else if (!strcmp(cur_opt, "no-cache"))
+ options->no_cache = 1;
+- else if(!strcmp(cur_opt,"no-checkpoint-read"))
++ else if (!strcmp(cur_opt, "no-checkpoint-read"))
+ options->skip_checkpoint_read = 1;
+- else if(!strcmp(cur_opt,"no-checkpoint-write"))
++ else if (!strcmp(cur_opt, "no-checkpoint-write"))
+ options->skip_checkpoint_write = 1;
+- else if(!strcmp(cur_opt,"no-checkpoint")){
++ else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+- printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",cur_opt);
++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++ cur_opt);
+ error = 1;
+ }
+-
+ }
+
+ return error;
+ }
+
+ static struct super_block *yaffs_internal_read_super(int yaffsVersion,
+- struct super_block *sb,
+- void *data, int silent)
++ struct super_block *sb,
++ void *data, int silent)
+ {
+ int nBlocks;
+ struct inode *inode = NULL;
+@@ -1602,6 +1844,7 @@ static struct super_block *yaffs_interna
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
++ sb->s_flags |= MS_NOATIME;
+
+ if (!sb)
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+@@ -1614,14 +1857,14 @@ static struct super_block *yaffs_interna
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf));
+
+- if(!data_str)
++ if (!data_str)
+ data_str = "";
+
+- printk(KERN_INFO "yaffs: passed flags \"%s\"\n",data_str);
++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+- memset(&options,0,sizeof(options));
++ memset(&options, 0, sizeof(options));
+
+- if(yaffs_parse_options(&options,data_str)){
++ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+@@ -1645,9 +1888,9 @@ static struct super_block *yaffs_interna
+ yaffs_devname(sb, devname_buf)));
+
+ /* Check it's an mtd device..... */
+- if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
++ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+- }
++
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+@@ -1673,29 +1916,23 @@ static struct super_block *yaffs_interna
+ T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
+ T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
+ T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
+- T(YAFFS_TRACE_OS, (" size %d\n", mtd->size));
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++ T(YAFFS_TRACE_OS, (" size %u\n", mtd->size));
++#else
++ T(YAFFS_TRACE_OS, (" size %lld\n", mtd->size));
++#endif
+
+ #ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+- if (yaffsVersion == 1 &&
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- mtd->writesize >= 2048) {
+-#else
+- mtd->oobblock >= 2048) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs2\n"));
+- yaffsVersion = 2;
++ if (yaffsVersion == 1 && WRITE_SIZE(mtd) >= 2048) {
++ T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs2\n"));
++ yaffsVersion = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+- if (yaffsVersion == 2 &&
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- mtd->writesize == 512) {
+-#else
+- mtd->oobblock == 512) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs1\n"));
+- yaffsVersion = 1;
++ if (yaffsVersion == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
++ T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs1\n"));
++ yaffsVersion = 1;
+ }
+
+ #endif
+@@ -1707,7 +1944,7 @@ static struct super_block *yaffs_interna
+ !mtd->block_markbad ||
+ !mtd->read ||
+ !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ !mtd->read_oob || !mtd->write_oob) {
+ #else
+ !mtd->write_ecc ||
+@@ -1719,12 +1956,9 @@ static struct super_block *yaffs_interna
+ return NULL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- if (mtd->writesize < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+-#else
+- if (mtd->oobblock < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+-#endif
+- mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) {
++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++ !options.inband_tags) {
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device does not have the "
+ "right page sizes\n"));
+@@ -1735,7 +1969,7 @@ static struct super_block *yaffs_interna
+ if (!mtd->erase ||
+ !mtd->read ||
+ !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ !mtd->read_oob || !mtd->write_oob) {
+ #else
+ !mtd->write_ecc ||
+@@ -1761,7 +1995,7 @@ static struct super_block *yaffs_interna
+ * Set the yaffs_Device up for mtd
+ */
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+ #else
+ sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+@@ -1780,13 +2014,15 @@ static struct super_block *yaffs_interna
+
+ /* Set up the memory size parameters.... */
+
+- nBlocks = mtd->size / (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
++ nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
++
+ dev->startBlock = 0;
+ dev->endBlock = nBlocks - 1;
+ dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
+- dev->nDataBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
++ dev->totalBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
+ dev->nReservedBlocks = 5;
+ dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
++ dev->inbandTags = options.inband_tags;
+
+ /* ... and the functions. */
+ if (yaffsVersion == 2) {
+@@ -1798,20 +2034,19 @@ static struct super_block *yaffs_interna
+ dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
+ dev->spareBuffer = YMALLOC(mtd->oobsize);
+ dev->isYaffs2 = 1;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- dev->nDataBytesPerChunk = mtd->writesize;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ dev->totalBytesPerChunk = mtd->writesize;
+ dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
+ #else
+- dev->nDataBytesPerChunk = mtd->oobblock;
++ dev->totalBytesPerChunk = mtd->oobblock;
+ dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
+ #endif
+- nBlocks = mtd->size / mtd->erasesize;
++ nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+- dev->nCheckpointReservedBlocks = CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS;
+ dev->startBlock = 0;
+ dev->endBlock = nBlocks - 1;
+ } else {
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ /* use the MTD interface in yaffs_mtdif1.c */
+ dev->writeChunkWithTagsToNAND =
+ nandmtd1_WriteChunkWithTagsToNAND;
+@@ -1847,7 +2082,7 @@ static struct super_block *yaffs_interna
+ dev->skipCheckpointWrite = options.skip_checkpoint_write;
+
+ /* we assume this is protected by lock_kernel() in mount/umount */
+- list_add_tail(&dev->devList, &yaffs_dev_list);
++ ylist_add_tail(&dev->devList, &yaffs_dev_list);
+
+ init_MUTEX(&dev->grossLock);
+
+@@ -1884,20 +2119,23 @@ static struct super_block *yaffs_interna
+ return NULL;
+ }
+ sb->s_root = root;
++ sb->s_dirt = !dev->isCheckpointed;
++ T(YAFFS_TRACE_ALWAYS,
++ ("yaffs_read_super: isCheckpointed %d\n", dev->isCheckpointed));
+
+ T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
+ return sb;
+ }
+
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+@@ -1938,14 +2176,14 @@ static DECLARE_FSTYPE(yaffs_fs_type, "ya
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+@@ -1990,12 +2228,12 @@ static char *yaffs_dump_dev(char *buf, y
+ {
+ buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
+ buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
++ buf += sprintf(buf, "totalBytesPerChunk. %d\n", dev->totalBytesPerChunk);
+ buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
+ buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
+ buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
+ buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
+ buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
+- buf += sprintf(buf, "nCheckptResBlocks.. %d\n", dev->nCheckpointReservedBlocks);
+ buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
+ buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
+ buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
+@@ -2006,10 +2244,8 @@ static char *yaffs_dump_dev(char *buf, y
+ buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
+ buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
+ buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
+- buf +=
+- sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
+- buf +=
+- sprintf(buf, "passiveGCs......... %d\n",
++ buf += sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
++ buf += sprintf(buf, "passiveGCs......... %d\n",
+ dev->passiveGarbageCollections);
+ buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
+ buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
+@@ -2025,6 +2261,7 @@ static char *yaffs_dump_dev(char *buf, y
+ sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
+ buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
+ buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
++ buf += sprintf(buf, "inbandTags......... %d\n", dev->inbandTags);
+
+ return buf;
+ }
+@@ -2033,7 +2270,7 @@ static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+ {
+- struct list_head *item;
++ struct ylist_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+@@ -2057,8 +2294,8 @@ static int yaffs_proc_read(char *page,
+ lock_kernel();
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+- list_for_each(item, &yaffs_dev_list) {
+- yaffs_Device *dev = list_entry(item, yaffs_Device, devList);
++ ylist_for_each(item, &yaffs_dev_list) {
++ yaffs_Device *dev = ylist_entry(item, yaffs_Device, devList);
+ if (n < step) {
+ n++;
+ continue;
+@@ -2119,7 +2356,7 @@ static int yaffs_proc_write(struct file
+ char *end;
+ char *mask_name;
+ const char *x;
+- char substring[MAX_MASK_NAME_LENGTH+1];
++ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+@@ -2129,9 +2366,8 @@ static int yaffs_proc_write(struct file
+
+ while (!done && (pos < count)) {
+ done = 1;
+- while ((pos < count) && isspace(buf[pos])) {
++ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+- }
+
+ switch (buf[pos]) {
+ case '+':
+@@ -2148,20 +2384,21 @@ static int yaffs_proc_write(struct file
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+- for(x = buf + pos, i = 0;
+- (*x == '_' || (*x >='a' && *x <= 'z')) &&
+- i <MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+- substring[i] = *x;
++ for (x = buf + pos, i = 0;
++ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+- if(strcmp(substring,mask_flags[i].mask_name) == 0){
++ if (strcmp(substring, mask_flags[i].mask_name) == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield = mask_flags[i].mask_bitfield;
+ done = 0;
+@@ -2172,7 +2409,7 @@ static int yaffs_proc_write(struct file
+
+ if (mask_name != NULL) {
+ done = 0;
+- switch(add) {
++ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+@@ -2191,13 +2428,13 @@ static int yaffs_proc_write(struct file
+
+ yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
+
+- printk("new trace = 0x%08X\n",yaffs_traceMask);
++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_traceMask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
+- printk("%c%s\n", flag, mask_flags[i].mask_name);
++ printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
+ }
+ }
+
+@@ -2211,12 +2448,8 @@ struct file_system_to_install {
+ };
+
+ static struct file_system_to_install fs_to_install[] = {
+-//#ifdef CONFIG_YAFFS_YAFFS1
+ {&yaffs_fs_type, 0},
+-//#endif
+-//#ifdef CONFIG_YAFFS_YAFFS2
+ {&yaffs2_fs_type, 0},
+-//#endif
+ {NULL, 0}
+ };
+
+@@ -2231,15 +2464,14 @@ static int __init init_yaffs_fs(void)
+ /* Install the proc_fs entry */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG,
+- &proc_root);
++ YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+- } else {
++ } else
+ return -ENOMEM;
+- }
+
+ /* Now add the file system entries */
+
+@@ -2247,9 +2479,8 @@ static int __init init_yaffs_fs(void)
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+- if (!error) {
++ if (!error)
+ fsinst->installed = 1;
+- }
+ fsinst++;
+ }
+
+@@ -2277,7 +2508,7 @@ static void __exit exit_yaffs_fs(void)
+ T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
+ " removing. \n"));
+
+- remove_proc_entry("yaffs", &proc_root);
++ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+@@ -2288,7 +2519,6 @@ static void __exit exit_yaffs_fs(void)
+ }
+ fsinst++;
+ }
+-
+ }
+
+ module_init(init_yaffs_fs)
+--- /dev/null
++++ b/fs/yaffs2/yaffs_getblockinfo.h
+@@ -0,0 +1,34 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GETBLOCKINFO_H__
++#define __YAFFS_GETBLOCKINFO_H__
++
++#include "yaffs_guts.h"
++
++/* Function to manipulate block info */
++static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
++{
++ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
++ blk));
++ YBUG();
++ }
++ return &dev->blockInfo[blk - dev->internalStartBlock];
++}
++
++#endif
+--- a/fs/yaffs2/yaffs_guts.c
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -12,16 +12,17 @@
+ */
+
+ const char *yaffs_guts_c_version =
+- "$Id: yaffs_guts.c,v 1.49 2007-05-15 20:07:40 charles Exp $";
++ "$Id: yaffs_guts.c,v 1.82 2009-03-09 04:24:17 charles Exp $";
+
+ #include "yportenv.h"
+
+ #include "yaffsinterface.h"
+ #include "yaffs_guts.h"
+ #include "yaffs_tagsvalidity.h"
++#include "yaffs_getblockinfo.h"
+
+ #include "yaffs_tagscompat.h"
+-#ifndef CONFIG_YAFFS_USE_OWN_SORT
++#ifndef CONFIG_YAFFS_USE_OWN_SORT
+ #include "yaffs_qsort.h"
+ #endif
+ #include "yaffs_nand.h"
+@@ -32,116 +33,116 @@ const char *yaffs_guts_c_version =
+ #include "yaffs_packedtags2.h"
+
+
+-#ifdef CONFIG_YAFFS_WINCE
+-void yfsd_LockYAFFS(BOOL fsLockOnly);
+-void yfsd_UnlockYAFFS(BOOL fsLockOnly);
+-#endif
+-
+ #define YAFFS_PASSIVE_GC_CHUNKS 2
+
+ #include "yaffs_ecc.h"
+
+
+ /* Robustification (if it ever comes about...) */
+-static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND);
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags);
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_ExtendedTags * tags);
++static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND);
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++ int erasedOk);
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_ExtendedTags *tags);
+
+ /* Other local prototypes */
+-static int yaffs_UnlinkObject( yaffs_Object *obj);
++static int yaffs_UnlinkObject(yaffs_Object *obj);
+ static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
+
+ static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
+
+-static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device * dev,
+- const __u8 * buffer,
+- yaffs_ExtendedTags * tags,
+- int useReserve);
+-static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
+- int chunkInNAND, int inScan);
+-
+-static yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
+- yaffs_ObjectType type);
+-static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
+- yaffs_Object * obj);
+-static int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name,
+- int force, int isShrink, int shadows);
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj);
++static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device *dev,
++ const __u8 *buffer,
++ yaffs_ExtendedTags *tags,
++ int useReserve);
++static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
++ int chunkInNAND, int inScan);
++
++static yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
++ yaffs_ObjectType type);
++static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
++ yaffs_Object *obj);
++static int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name,
++ int force, int isShrink, int shadows);
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj);
+ static int yaffs_CheckStructures(void);
+-static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
+- int chunkOffset, int *limit);
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object * in);
+-
+-static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blockNo);
+-
+-static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo);
+-static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
+- int lineNo);
++static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
++ int chunkOffset, int *limit);
++static int yaffs_DoGenericObjectDeletion(yaffs_Object *in);
++
++static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device *dev, int blockNo);
+
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND);
+
+-static int yaffs_UnlinkWorker(yaffs_Object * obj);
+-static void yaffs_DestroyObject(yaffs_Object * obj);
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++ int chunkInNAND);
+
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
+- int chunkInObject);
++static int yaffs_UnlinkWorker(yaffs_Object *obj);
+
+-loff_t yaffs_GetFileSize(yaffs_Object * obj);
++static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
++ int chunkInObject);
+
+-static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr);
++static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
++ yaffs_BlockInfo **blockUsedPtr);
+
+-static void yaffs_VerifyFreeChunks(yaffs_Device * dev);
++static void yaffs_VerifyFreeChunks(yaffs_Device *dev);
+
+ static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
+
++static void yaffs_VerifyDirectory(yaffs_Object *directory);
+ #ifdef YAFFS_PARANOID
+-static int yaffs_CheckFileSanity(yaffs_Object * in);
++static int yaffs_CheckFileSanity(yaffs_Object *in);
+ #else
+ #define yaffs_CheckFileSanity(in)
+ #endif
+
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in);
+-static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId);
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in);
++static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId);
+
+ static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
+
+-static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
+- yaffs_ExtendedTags * tags);
++static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
++ yaffs_ExtendedTags *tags);
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos);
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct,
+- __u32 chunkId);
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
++ unsigned pos);
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct,
++ __u32 chunkId);
+
+
+ /* Function to calculate chunk and offset */
+
+-static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, __u32 *chunk, __u32 *offset)
++static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut,
++ __u32 *offsetOut)
+ {
+- if(dev->chunkShift){
+- /* Easy-peasy power of 2 case */
+- *chunk = (__u32)(addr >> dev->chunkShift);
+- *offset = (__u32)(addr & dev->chunkMask);
+- }
+- else if(dev->crumbsPerChunk)
+- {
+- /* Case where we're using "crumbs" */
+- *offset = (__u32)(addr & dev->crumbMask);
+- addr >>= dev->crumbShift;
+- *chunk = ((__u32)addr)/dev->crumbsPerChunk;
+- *offset += ((addr - (*chunk * dev->crumbsPerChunk)) << dev->crumbShift);
++ int chunk;
++ __u32 offset;
++
++ chunk = (__u32)(addr >> dev->chunkShift);
++
++ if (dev->chunkDiv == 1) {
++ /* easy power of 2 case */
++ offset = (__u32)(addr & dev->chunkMask);
++ } else {
++ /* Non power-of-2 case */
++
++ loff_t chunkBase;
++
++ chunk /= dev->chunkDiv;
++
++ chunkBase = ((loff_t)chunk) * dev->nDataBytesPerChunk;
++ offset = (__u32)(addr - chunkBase);
+ }
+- else
+- YBUG();
++
++ *chunkOut = chunk;
++ *offsetOut = offset;
+ }
+
+-/* Function to return the number of shifts for a power of 2 greater than or equal
+- * to the given number
++/* Function to return the number of shifts for a power of 2 greater than or
++ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+@@ -153,13 +154,14 @@ static __u32 ShiftsGE(__u32 x)
+
+ nShifts = extraBits = 0;
+
+- while(x>1){
+- if(x & 1) extraBits++;
+- x>>=1;
++ while (x > 1) {
++ if (x & 1)
++ extraBits++;
++ x >>= 1;
+ nShifts++;
+ }
+
+- if(extraBits)
++ if (extraBits)
+ nShifts++;
+
+ return nShifts;
+@@ -168,16 +170,17 @@ static __u32 ShiftsGE(__u32 x)
+ /* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+-static __u32 ShiftDiv(__u32 x)
++static __u32 Shifts(__u32 x)
+ {
+ int nShifts;
+
+ nShifts = 0;
+
+- if(!x) return 0;
++ if (!x)
++ return 0;
+
+- while( !(x&1)){
+- x>>=1;
++ while (!(x&1)) {
++ x >>= 1;
+ nShifts++;
+ }
+
+@@ -195,21 +198,25 @@ static int yaffs_InitialiseTempBuffers(y
+ int i;
+ __u8 *buf = (__u8 *)1;
+
+- memset(dev->tempBuffer,0,sizeof(dev->tempBuffer));
++ memset(dev->tempBuffer, 0, sizeof(dev->tempBuffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->tempBuffer[i].line = 0; /* not in use */
+ dev->tempBuffer[i].buffer = buf =
+- YMALLOC_DMA(dev->nDataBytesPerChunk);
++ YMALLOC_DMA(dev->totalBytesPerChunk);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+-
+ }
+
+-static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo)
++__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo)
+ {
+ int i, j;
++
++ dev->tempInUse++;
++ if (dev->tempInUse > dev->maxTemp)
++ dev->maxTemp = dev->tempInUse;
++
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].line == 0) {
+ dev->tempBuffer[i].line = lineNo;
+@@ -227,9 +234,9 @@ static __u8 *yaffs_GetTempBuffer(yaffs_D
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Out of temp buffers at line %d, other held by lines:"),
+ lineNo));
+- for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
+- }
++
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
+
+ /*
+@@ -242,10 +249,13 @@ static __u8 *yaffs_GetTempBuffer(yaffs_D
+
+ }
+
+-static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
++void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer,
+ int lineNo)
+ {
+ int i;
++
++ dev->tempInUse--;
++
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].buffer == buffer) {
+ dev->tempBuffer[i].line = 0;
+@@ -267,27 +277,26 @@ static void yaffs_ReleaseTempBuffer(yaff
+ /*
+ * Determine if we have a managed buffer.
+ */
+-int yaffs_IsManagedTempBuffer(yaffs_Device * dev, const __u8 * buffer)
++int yaffs_IsManagedTempBuffer(yaffs_Device *dev, const __u8 *buffer)
+ {
+ int i;
++
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].buffer == buffer)
+ return 1;
++ }
+
++ for (i = 0; i < dev->nShortOpCaches; i++) {
++ if (dev->srCache[i].data == buffer)
++ return 1;
+ }
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if( dev->srCache[i].data == buffer )
+- return 1;
+-
+- }
+-
+- if (buffer == dev->checkpointBuffer)
+- return 1;
+-
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
+- return 0;
++ if (buffer == dev->checkpointBuffer)
++ return 1;
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
++ return 0;
+ }
+
+
+@@ -296,62 +305,63 @@ int yaffs_IsManagedTempBuffer(yaffs_Devi
+ * Chunk bitmap manipulations
+ */
+
+-static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device * dev, int blk)
++static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device *dev, int blk)
+ {
+ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
+- blk));
++ (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
++ blk));
+ YBUG();
+ }
+ return dev->chunkBits +
+- (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
++ (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
+ }
+
+ static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
+ {
+- if(blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
+- chunk < 0 || chunk >= dev->nChunksPerBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),blk,chunk));
+- YBUG();
++ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
++ chunk < 0 || chunk >= dev->nChunksPerBlock) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
++ blk, chunk));
++ YBUG();
+ }
+ }
+
+-static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device * dev, int blk)
++static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device *dev, int blk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+ memset(blkBits, 0, dev->chunkBitmapStride);
+ }
+
+-static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device * dev, int blk, int chunk)
++static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device *dev, int blk, int chunk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+- yaffs_VerifyChunkBitId(dev,blk,chunk);
++ yaffs_VerifyChunkBitId(dev, blk, chunk);
+
+ blkBits[chunk / 8] &= ~(1 << (chunk & 7));
+ }
+
+-static Y_INLINE void yaffs_SetChunkBit(yaffs_Device * dev, int blk, int chunk)
++static Y_INLINE void yaffs_SetChunkBit(yaffs_Device *dev, int blk, int chunk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+- yaffs_VerifyChunkBitId(dev,blk,chunk);
++ yaffs_VerifyChunkBitId(dev, blk, chunk);
+
+ blkBits[chunk / 8] |= (1 << (chunk & 7));
+ }
+
+-static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device * dev, int blk, int chunk)
++static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device *dev, int blk, int chunk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- yaffs_VerifyChunkBitId(dev,blk,chunk);
++ yaffs_VerifyChunkBitId(dev, blk, chunk);
+
+ return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+ }
+
+-static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device * dev, int blk)
++static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device *dev, int blk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ int i;
+@@ -363,17 +373,17 @@ static Y_INLINE int yaffs_StillSomeChunk
+ return 0;
+ }
+
+-static int yaffs_CountChunkBits(yaffs_Device * dev, int blk)
++static int yaffs_CountChunkBits(yaffs_Device *dev, int blk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ int i;
+ int n = 0;
+ for (i = 0; i < dev->chunkBitmapStride; i++) {
+ __u8 x = *blkBits;
+- while(x){
+- if(x & 1)
++ while (x) {
++ if (x & 1)
+ n++;
+- x >>=1;
++ x >>= 1;
+ }
+
+ blkBits++;
+@@ -400,7 +410,7 @@ static int yaffs_SkipNANDVerification(ya
+ return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
+ }
+
+-static const char * blockStateName[] = {
++static const char *blockStateName[] = {
+ "Unknown",
+ "Needs scanning",
+ "Scanning",
+@@ -413,64 +423,65 @@ static const char * blockStateName[] = {
+ "Dead"
+ };
+
+-static void yaffs_VerifyBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
++static void yaffs_VerifyBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, int n)
+ {
+ int actuallyUsed;
+ int inUse;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ /* Report illegal runtime states */
+- if(bi->blockState <0 || bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has undefined state %d"TENDSTR),n,bi->blockState));
++ if (bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->blockState));
+
+- switch(bi->blockState){
+- case YAFFS_BLOCK_STATE_UNKNOWN:
+- case YAFFS_BLOCK_STATE_SCANNING:
+- case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has bad run-state %s"TENDSTR),
+- n,blockStateName[bi->blockState]));
++ switch (bi->blockState) {
++ case YAFFS_BLOCK_STATE_UNKNOWN:
++ case YAFFS_BLOCK_STATE_SCANNING:
++ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
++ n, blockStateName[bi->blockState]));
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actuallyUsed = bi->pagesInUse - bi->softDeletions;
+
+- if(bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
++ if (bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
+ bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
+ actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
+- n,bi->pagesInUse,bi->softDeletions));
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
++ n, bi->pagesInUse, bi->softDeletions));
+
+
+ /* Check chunk bitmap legal */
+- inUse = yaffs_CountChunkBits(dev,n);
+- if(inUse != bi->pagesInUse)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
+- n,bi->pagesInUse,inUse));
++ inUse = yaffs_CountChunkBits(dev, n);
++ if (inUse != bi->pagesInUse)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
++ n, bi->pagesInUse, inUse));
+
+ /* Check that the sequence number is valid.
+ * Ten million is legal, but is very unlikely
+ */
+- if(dev->isYaffs2 &&
++ if (dev->isYaffs2 &&
+ (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
+- (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000 ))
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has suspect sequence number of %d"TENDSTR),
+- n,bi->sequenceNumber));
+-
++ (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000))
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR),
++ n, bi->sequenceNumber));
+ }
+
+-static void yaffs_VerifyCollectedBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
++static void yaffs_VerifyCollectedBlock(yaffs_Device *dev, yaffs_BlockInfo *bi,
++ int n)
+ {
+- yaffs_VerifyBlock(dev,bi,n);
++ yaffs_VerifyBlock(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+- /* TODO: This will need to change if we do partial gc */
++ /* This will need to change if we do partial gc */
+
+- if(bi->blockState != YAFFS_BLOCK_STATE_EMPTY){
+- T(YAFFS_TRACE_ERROR,(TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
+- n,bi->blockState));
++ if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING &&
++ bi->blockState != YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
++ n, bi->blockState));
+ }
+ }
+
+@@ -480,52 +491,49 @@ static void yaffs_VerifyBlocks(yaffs_Dev
+ int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int nIllegalBlockStates = 0;
+
+-
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+- memset(nBlocksPerState,0,sizeof(nBlocksPerState));
+-
++ memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
+
+- for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++){
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+- yaffs_VerifyBlock(dev,bi,i);
++ for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++ yaffs_VerifyBlock(dev, bi, i);
+
+- if(bi->blockState >=0 && bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
++ if (bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
+ nBlocksPerState[bi->blockState]++;
+ else
+ nIllegalBlockStates++;
+-
+ }
+
+- T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block summary"TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
+
+- T(YAFFS_TRACE_VERIFY,(TSTR("%d blocks have illegal states"TENDSTR),nIllegalBlockStates));
+- if(nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Too many allocating blocks"TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
+
+- for(i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("%s %d blocks"TENDSTR),
+- blockStateName[i],nBlocksPerState[i]));
++ blockStateName[i], nBlocksPerState[i]));
+
+- if(dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
++ if (dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
+ dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
+
+- if(dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
++ if (dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
+ dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
+
+- if(nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
+ nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
+
+- T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
+
+ }
+
+@@ -535,26 +543,26 @@ static void yaffs_VerifyBlocks(yaffs_Dev
+ */
+ static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
+ {
+- if(yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+- if(!(tags && obj && oh)){
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
+- (__u32)tags,(__u32)obj,(__u32)oh));
++ if (!(tags && obj && oh)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
++ (__u32)tags, (__u32)obj, (__u32)oh));
+ return;
+ }
+
+- if(oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+- oh->type > YAFFS_OBJECT_TYPE_MAX)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
+- tags->objectId, oh->type));
+-
+- if(tags->objectId != obj->objectId)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
+- tags->objectId, obj->objectId));
++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++ oh->type > YAFFS_OBJECT_TYPE_MAX)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
++ tags->objectId, oh->type));
++
++ if (tags->objectId != obj->objectId)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
++ tags->objectId, obj->objectId));
+
+
+ /*
+@@ -563,46 +571,43 @@ static void yaffs_VerifyObjectHeader(yaf
+ * Tests do not apply to the root object.
+ */
+
+- if(parentCheck && tags->objectId > 1 && !obj->parent)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
+- tags->objectId, oh->parentObjectId));
+-
+-
+- if(parentCheck && obj->parent &&
+- oh->parentObjectId != obj->parent->objectId &&
+- (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
+- obj->parent->objectId != YAFFS_OBJECTID_DELETED))
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
+- tags->objectId, oh->parentObjectId, obj->parent->objectId));
++ if (parentCheck && tags->objectId > 1 && !obj->parent)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
++ tags->objectId, oh->parentObjectId));
+
++ if (parentCheck && obj->parent &&
++ oh->parentObjectId != obj->parent->objectId &&
++ (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
++ obj->parent->objectId != YAFFS_OBJECTID_DELETED))
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
++ tags->objectId, oh->parentObjectId, obj->parent->objectId));
+
+- if(tags->objectId > 1 && oh->name[0] == 0) /* Null name */
++ if (tags->objectId > 1 && oh->name[0] == 0) /* Null name */
+ T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is NULL"TENDSTR),
+- obj->objectId));
++ (TSTR("Obj %d header name is NULL"TENDSTR),
++ obj->objectId));
+
+- if(tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
++ if (tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
+ T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is 0xFF"TENDSTR),
+- obj->objectId));
++ (TSTR("Obj %d header name is 0xFF"TENDSTR),
++ obj->objectId));
+ }
+
+
+
+-static int yaffs_VerifyTnodeWorker(yaffs_Object * obj, yaffs_Tnode * tn,
+- __u32 level, int chunkOffset)
++static int yaffs_VerifyTnodeWorker(yaffs_Object *obj, yaffs_Tnode *tn,
++ __u32 level, int chunkOffset)
+ {
+ int i;
+ yaffs_Device *dev = obj->myDev;
+ int ok = 1;
+- int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+ if (tn) {
+ if (level > 0) {
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs_VerifyTnodeWorker(obj,
+ tn->internal[i],
+@@ -611,20 +616,19 @@ static int yaffs_VerifyTnodeWorker(yaffs
+ }
+ }
+ } else if (level == 0) {
+- int i;
+ yaffs_ExtendedTags tags;
+ __u32 objectId = obj->objectId;
+
+ chunkOffset <<= YAFFS_TNODES_LEVEL0_BITS;
+
+- for(i = 0; i < YAFFS_NTNODES_LEVEL0; i++){
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++ for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
++ __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+
+- if(theChunk > 0){
++ if (theChunk > 0) {
+ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
+- if(tags.objectId != objectId || tags.chunkId != chunkOffset){
+- T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
++ if (tags.objectId != objectId || tags.chunkId != chunkOffset) {
++ T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+ objectId, chunkOffset, theChunk,
+ tags.objectId, tags.chunkId));
+ }
+@@ -646,13 +650,15 @@ static void yaffs_VerifyFile(yaffs_Objec
+ __u32 lastChunk;
+ __u32 x;
+ __u32 i;
+- int ok;
+ yaffs_Device *dev;
+ yaffs_ExtendedTags tags;
+ yaffs_Tnode *tn;
+ __u32 objectId;
+
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (!obj)
++ return;
++
++ if (yaffs_SkipVerification(obj->myDev))
+ return;
+
+ dev = obj->myDev;
+@@ -662,17 +668,17 @@ static void yaffs_VerifyFile(yaffs_Objec
+ lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
+ x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
+ requiredTallness = 0;
+- while (x> 0) {
++ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ requiredTallness++;
+ }
+
+ actualTallness = obj->variant.fileVariant.topLevel;
+
+- if(requiredTallness > actualTallness )
++ if (requiredTallness > actualTallness)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
+- obj->objectId,actualTallness, requiredTallness));
++ obj->objectId, actualTallness, requiredTallness));
+
+
+ /* Check that the chunks in the tnode tree are all correct.
+@@ -680,39 +686,31 @@ static void yaffs_VerifyFile(yaffs_Objec
+ * checking the tags for every chunk match.
+ */
+
+- if(yaffs_SkipNANDVerification(dev))
++ if (yaffs_SkipNANDVerification(dev))
+ return;
+
+- for(i = 1; i <= lastChunk; i++){
+- tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant,i);
++ for (i = 1; i <= lastChunk; i++) {
++ tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant, i);
+
+ if (tn) {
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
+- if(theChunk > 0){
++ __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++ if (theChunk > 0) {
+ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
+- if(tags.objectId != objectId || tags.chunkId != i){
+- T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
++ if (tags.objectId != objectId || tags.chunkId != i) {
++ T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+ objectId, i, theChunk,
+ tags.objectId, tags.chunkId));
+ }
+ }
+ }
+-
+ }
+-
+ }
+
+-static void yaffs_VerifyDirectory(yaffs_Object *obj)
+-{
+- if(obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-
+-}
+
+ static void yaffs_VerifyHardLink(yaffs_Object *obj)
+ {
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ /* Verify sane equivalent object */
+@@ -720,7 +718,7 @@ static void yaffs_VerifyHardLink(yaffs_O
+
+ static void yaffs_VerifySymlink(yaffs_Object *obj)
+ {
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ /* Verify symlink string */
+@@ -728,7 +726,7 @@ static void yaffs_VerifySymlink(yaffs_Ob
+
+ static void yaffs_VerifySpecial(yaffs_Object *obj)
+ {
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+ }
+
+@@ -740,14 +738,19 @@ static void yaffs_VerifyObject(yaffs_Obj
+ __u32 chunkMax;
+
+ __u32 chunkIdOk;
+- __u32 chunkIsLive;
++ __u32 chunkInRange;
++ __u32 chunkShouldNotBeDeleted;
++ __u32 chunkValid;
++
++ if (!obj)
++ return;
+
+- if(!obj)
++ if (obj->beingCreated)
+ return;
+
+ dev = obj->myDev;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ /* Check sane object header chunk */
+@@ -755,50 +758,54 @@ static void yaffs_VerifyObject(yaffs_Obj
+ chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
+ chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
+
+- chunkIdOk = (obj->chunkId >= chunkMin && obj->chunkId <= chunkMax);
+- chunkIsLive = chunkIdOk &&
++ chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax);
++ chunkIdOk = chunkInRange || obj->hdrChunk == 0;
++ chunkValid = chunkInRange &&
+ yaffs_CheckChunkBit(dev,
+- obj->chunkId / dev->nChunksPerBlock,
+- obj->chunkId % dev->nChunksPerBlock);
+- if(!obj->fake &&
+- (!chunkIdOk || !chunkIsLive)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
+- obj->objectId,obj->chunkId,
+- chunkIdOk ? "" : ",out of range",
+- chunkIsLive || !chunkIdOk ? "" : ",marked as deleted"));
++ obj->hdrChunk / dev->nChunksPerBlock,
++ obj->hdrChunk % dev->nChunksPerBlock);
++ chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++
++ if (!obj->fake &&
++ (!chunkIdOk || chunkShouldNotBeDeleted)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
++ obj->objectId, obj->hdrChunk,
++ chunkIdOk ? "" : ",out of range",
++ chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
+ }
+
+- if(chunkIdOk && chunkIsLive &&!yaffs_SkipNANDVerification(dev)) {
++ if (chunkValid && !yaffs_SkipNANDVerification(dev)) {
+ yaffs_ExtendedTags tags;
+ yaffs_ObjectHeader *oh;
+- __u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__);
++ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+ oh = (yaffs_ObjectHeader *)buffer;
+
+- yaffs_ReadChunkWithTagsFromNAND(dev, obj->chunkId,buffer, &tags);
++ yaffs_ReadChunkWithTagsFromNAND(dev, obj->hdrChunk, buffer,
++ &tags);
+
+- yaffs_VerifyObjectHeader(obj,oh,&tags,1);
++ yaffs_VerifyObjectHeader(obj, oh, &tags, 1);
+
+- yaffs_ReleaseTempBuffer(dev,buffer,__LINE__);
++ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+ }
+
+ /* Verify it has a parent */
+- if(obj && !obj->fake &&
+- (!obj->parent || obj->parent->myDev != dev)){
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
+- obj->objectId,obj->parent));
++ if (obj && !obj->fake &&
++ (!obj->parent || obj->parent->myDev != dev)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
++ obj->objectId, obj->parent));
+ }
+
+ /* Verify parent is a directory */
+- if(obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY){
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
+- obj->objectId,obj->parent->variantType));
++ if (obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
++ obj->objectId, obj->parent->variantType));
+ }
+
+- switch(obj->variantType){
++ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_VerifyFile(obj);
+ break;
+@@ -818,33 +825,30 @@ static void yaffs_VerifyObject(yaffs_Obj
+ default:
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d has illegaltype %d"TENDSTR),
+- obj->objectId,obj->variantType));
++ obj->objectId, obj->variantType));
+ break;
+ }
+-
+-
+ }
+
+ static void yaffs_VerifyObjects(yaffs_Device *dev)
+ {
+ yaffs_Object *obj;
+ int i;
+- struct list_head *lh;
++ struct ylist_head *lh;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+- for(i = 0; i < YAFFS_NOBJECT_BUCKETS; i++){
+- list_for_each(lh, &dev->objectBucket[i].list) {
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->objectBucket[i].list) {
+ if (lh) {
+- obj = list_entry(lh, yaffs_Object, hashLink);
++ obj = ylist_entry(lh, yaffs_Object, hashLink);
+ yaffs_VerifyObject(obj);
+ }
+ }
+- }
+-
++ }
+ }
+
+
+@@ -855,19 +859,20 @@ static void yaffs_VerifyObjects(yaffs_De
+ static Y_INLINE int yaffs_HashFunction(int n)
+ {
+ n = abs(n);
+- return (n % YAFFS_NOBJECT_BUCKETS);
++ return n % YAFFS_NOBJECT_BUCKETS;
+ }
+
+ /*
+- * Access functions to useful fake objects
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+-yaffs_Object *yaffs_Root(yaffs_Device * dev)
++yaffs_Object *yaffs_Root(yaffs_Device *dev)
+ {
+ return dev->rootDir;
+ }
+
+-yaffs_Object *yaffs_LostNFound(yaffs_Device * dev)
++yaffs_Object *yaffs_LostNFound(yaffs_Device *dev)
+ {
+ return dev->lostNFoundDir;
+ }
+@@ -877,7 +882,7 @@ yaffs_Object *yaffs_LostNFound(yaffs_Dev
+ * Erased NAND checking functions
+ */
+
+-int yaffs_CheckFF(__u8 * buffer, int nBytes)
++int yaffs_CheckFF(__u8 *buffer, int nBytes)
+ {
+ /* Horrible, slow implementation */
+ while (nBytes--) {
+@@ -889,9 +894,8 @@ int yaffs_CheckFF(__u8 * buffer, int nBy
+ }
+
+ static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND)
++ int chunkInNAND)
+ {
+-
+ int retval = YAFFS_OK;
+ __u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
+ yaffs_ExtendedTags tags;
+@@ -899,10 +903,9 @@ static int yaffs_CheckChunkErased(struct
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
+
+- if(tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
++ if (tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+-
+ if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
+ T(YAFFS_TRACE_NANDACCESS,
+ (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
+@@ -915,11 +918,10 @@ static int yaffs_CheckChunkErased(struct
+
+ }
+
+-
+ static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- const __u8 * data,
+- yaffs_ExtendedTags * tags,
+- int useReserve)
++ const __u8 *data,
++ yaffs_ExtendedTags *tags,
++ int useReserve)
+ {
+ int attempts = 0;
+ int writeOk = 0;
+@@ -972,7 +974,7 @@ static int yaffs_WriteNewChunkWithTagsTo
+ erasedOk = yaffs_CheckChunkErased(dev, chunk);
+ if (erasedOk != YAFFS_OK) {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR ("**>> yaffs chunk %d was not erased"
++ (TSTR("**>> yaffs chunk %d was not erased"
+ TENDSTR), chunk));
+
+ /* try another chunk */
+@@ -992,7 +994,11 @@ static int yaffs_WriteNewChunkWithTagsTo
+ /* Copy the data into the robustification buffer */
+ yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
+
+- } while (writeOk != YAFFS_OK && attempts < yaffs_wr_attempts);
++ } while (writeOk != YAFFS_OK &&
++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++
++ if (!writeOk)
++ chunk = -1;
+
+ if (attempts > 1) {
+ T(YAFFS_TRACE_ERROR,
+@@ -1009,13 +1015,35 @@ static int yaffs_WriteNewChunkWithTagsTo
+ * Block retiring for handling a broken block.
+ */
+
+-static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND)
++static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND)
+ {
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+ yaffs_InvalidateCheckpoint(dev);
+
+- yaffs_MarkBlockBad(dev, blockInNAND);
++ if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) {
++ if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR(
++ "yaffs: Failed to mark bad and erase block %d"
++ TENDSTR), blockInNAND));
++ } else {
++ yaffs_ExtendedTags tags;
++ int chunkId = blockInNAND * dev->nChunksPerBlock;
++
++ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++ memset(buffer, 0xff, dev->nDataBytesPerChunk);
++ yaffs_InitialiseTags(&tags);
++ tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK;
++ if (dev->writeChunkWithTagsToNAND(dev, chunkId -
++ dev->chunkOffset, buffer, &tags) != YAFFS_OK)
++ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
++ TCONT("write bad block marker to block %d")
++ TENDSTR), blockInNAND));
++
++ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++ }
++ }
+
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ bi->gcPrioritise = 0;
+@@ -1029,49 +1057,45 @@ static void yaffs_RetireBlock(yaffs_Devi
+ *
+ */
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags)
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags)
+ {
+ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_ExtendedTags * tags)
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_ExtendedTags *tags)
+ {
+ }
+
+ void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
+ {
+- if(!bi->gcPrioritise){
++ if (!bi->gcPrioritise) {
+ bi->gcPrioritise = 1;
+ dev->hasPendingPrioritisedGCs = 1;
+- bi->chunkErrorStrikes ++;
++ bi->chunkErrorStrikes++;
+
+- if(bi->chunkErrorStrikes > 3){
++ if (bi->chunkErrorStrikes > 3) {
+ bi->needsRetiring = 1; /* Too many stikes, so retire this */
+ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
+
+ }
+-
+ }
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk)
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++ int erasedOk)
+ {
+-
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+- yaffs_HandleChunkError(dev,bi);
++ yaffs_HandleChunkError(dev, bi);
+
+-
+- if(erasedOk ) {
++ if (erasedOk) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
+-
+-
+ }
+
+ /* Delete the chunk */
+@@ -1081,12 +1105,12 @@ static void yaffs_HandleWriteChunkError(
+
+ /*---------------- Name handling functions ------------*/
+
+-static __u16 yaffs_CalcNameSum(const YCHAR * name)
++static __u16 yaffs_CalcNameSum(const YCHAR *name)
+ {
+ __u16 sum = 0;
+ __u16 i = 1;
+
+- YUCHAR *bname = (YUCHAR *) name;
++ const YUCHAR *bname = (const YUCHAR *) name;
+ if (bname) {
+ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
+
+@@ -1102,14 +1126,14 @@ static __u16 yaffs_CalcNameSum(const YCH
+ return sum;
+ }
+
+-static void yaffs_SetObjectName(yaffs_Object * obj, const YCHAR * name)
++static void yaffs_SetObjectName(yaffs_Object *obj, const YCHAR *name)
+ {
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH) {
++ memset(obj->shortName, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
++ if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH)
+ yaffs_strcpy(obj->shortName, name);
+- } else {
++ else
+ obj->shortName[0] = _Y('\0');
+- }
+ #endif
+ obj->sum = yaffs_CalcNameSum(name);
+ }
+@@ -1126,7 +1150,7 @@ static void yaffs_SetObjectName(yaffs_Ob
+ * Don't use this function directly
+ */
+
+-static int yaffs_CreateTnodes(yaffs_Device * dev, int nTnodes)
++static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes)
+ {
+ int i;
+ int tnodeSize;
+@@ -1143,6 +1167,9 @@ static int yaffs_CreateTnodes(yaffs_Devi
+ * Must be a multiple of 32-bits */
+ tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
+ /* make these things */
+
+ newTnodes = YMALLOC(nTnodes * tnodeSize);
+@@ -1150,7 +1177,7 @@ static int yaffs_CreateTnodes(yaffs_Devi
+
+ if (!newTnodes) {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
++ (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+@@ -1170,7 +1197,7 @@ static int yaffs_CreateTnodes(yaffs_Devi
+ dev->freeTnodes = newTnodes;
+ #else
+ /* New hookup for wide tnodes */
+- for(i = 0; i < nTnodes -1; i++) {
++ for (i = 0; i < nTnodes - 1; i++) {
+ curr = (yaffs_Tnode *) &mem[i * tnodeSize];
+ next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
+ curr->internal[0] = next;
+@@ -1197,7 +1224,6 @@ static int yaffs_CreateTnodes(yaffs_Devi
+ (TSTR
+ ("yaffs: Could not add tnodes to management list" TENDSTR)));
+ return YAFFS_FAIL;
+-
+ } else {
+ tnl->tnodes = newTnodes;
+ tnl->next = dev->allocatedTnodeList;
+@@ -1211,14 +1237,13 @@ static int yaffs_CreateTnodes(yaffs_Devi
+
+ /* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
+
+-static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device * dev)
++static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device *dev)
+ {
+ yaffs_Tnode *tn = NULL;
+
+ /* If there are none left make more */
+- if (!dev->freeTnodes) {
++ if (!dev->freeTnodes)
+ yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
+- }
+
+ if (dev->freeTnodes) {
+ tn = dev->freeTnodes;
+@@ -1233,21 +1258,27 @@ static yaffs_Tnode *yaffs_GetTnodeRaw(ya
+ dev->nFreeTnodes--;
+ }
+
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++
+ return tn;
+ }
+
+-static yaffs_Tnode *yaffs_GetTnode(yaffs_Device * dev)
++static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev)
+ {
+ yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
++ int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+- if(tn)
+- memset(tn, 0, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
++ if (tn)
++ memset(tn, 0, tnodeSize);
+
+ return tn;
+ }
+
+ /* FreeTnode frees up a tnode and puts it back on the free list */
+-static void yaffs_FreeTnode(yaffs_Device * dev, yaffs_Tnode * tn)
++static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn)
+ {
+ if (tn) {
+ #ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+@@ -1262,9 +1293,10 @@ static void yaffs_FreeTnode(yaffs_Device
+ dev->freeTnodes = tn;
+ dev->nFreeTnodes++;
+ }
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+ }
+
+-static void yaffs_DeinitialiseTnodes(yaffs_Device * dev)
++static void yaffs_DeinitialiseTnodes(yaffs_Device *dev)
+ {
+ /* Free the list of allocated tnodes */
+ yaffs_TnodeList *tmp;
+@@ -1282,71 +1314,72 @@ static void yaffs_DeinitialiseTnodes(yaf
+ dev->nFreeTnodes = 0;
+ }
+
+-static void yaffs_InitialiseTnodes(yaffs_Device * dev)
++static void yaffs_InitialiseTnodes(yaffs_Device *dev)
+ {
+ dev->allocatedTnodeList = NULL;
+ dev->freeTnodes = NULL;
+ dev->nFreeTnodes = 0;
+ dev->nTnodesCreated = 0;
+-
+ }
+
+
+-void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos, unsigned val)
++void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos,
++ unsigned val)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 mask;
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 mask;
+
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
+- val >>= dev->chunkGroupBits;
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++ val >>= dev->chunkGroupBits;
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap /32;
+- bitInWord = bitInMap & (32 -1);
++ bitInMap = pos * dev->tnodeWidth;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- mask = dev->tnodeMask << bitInWord;
++ mask = dev->tnodeMask << bitInWord;
+
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val << bitInWord));
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val << bitInWord));
+
+- if(dev->tnodeWidth > (32-bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val >> bitInWord));
+- }
++ if (dev->tnodeWidth > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val >> bitInWord));
++ }
+ }
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos)
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
++ unsigned pos)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 val;
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 val;
+
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap /32;
+- bitInWord = bitInMap & (32 -1);
++ bitInMap = pos * dev->tnodeWidth;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- val = map[wordInMap] >> bitInWord;
++ val = map[wordInMap] >> bitInWord;
+
+- if(dev->tnodeWidth > (32-bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- val |= (map[wordInMap] << bitInWord);
+- }
++ if (dev->tnodeWidth > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ val |= (map[wordInMap] << bitInWord);
++ }
+
+- val &= dev->tnodeMask;
+- val <<= dev->chunkGroupBits;
++ val &= dev->tnodeMask;
++ val <<= dev->chunkGroupBits;
+
+- return val;
++ return val;
+ }
+
+ /* ------------------- End of individual tnode manipulation -----------------*/
+@@ -1357,24 +1390,21 @@ static __u32 yaffs_GetChunkGroupBase(yaf
+ */
+
+ /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct,
+- __u32 chunkId)
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct,
++ __u32 chunkId)
+ {
+-
+ yaffs_Tnode *tn = fStruct->top;
+ __u32 i;
+ int requiredTallness;
+ int level = fStruct->topLevel;
+
+ /* Check sane level and chunk Id */
+- if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) {
++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+- }
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID) {
++ if (chunkId > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+- }
+
+ /* First check we're tall enough (ie enough topLevel) */
+
+@@ -1385,22 +1415,17 @@ static yaffs_Tnode *yaffs_FindLevel0Tnod
+ requiredTallness++;
+ }
+
+- if (requiredTallness > fStruct->topLevel) {
+- /* Not tall enough, so we can't find it, return NULL. */
+- return NULL;
+- }
++ if (requiredTallness > fStruct->topLevel)
++ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+- tn = tn->
+- internal[(chunkId >>
+- ( YAFFS_TNODES_LEVEL0_BITS +
+- (level - 1) *
+- YAFFS_TNODES_INTERNAL_BITS)
+- ) &
+- YAFFS_TNODES_INTERNAL_MASK];
++ tn = tn->internal[(chunkId >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (level - 1) *
++ YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+-
+ }
+
+ return tn;
+@@ -1417,12 +1442,11 @@ static yaffs_Tnode *yaffs_FindLevel0Tnod
+ * be plugged into the ttree.
+ */
+
+-static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct,
+- __u32 chunkId,
+- yaffs_Tnode *passedTn)
++static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct,
++ __u32 chunkId,
++ yaffs_Tnode *passedTn)
+ {
+-
+ int requiredTallness;
+ int i;
+ int l;
+@@ -1432,13 +1456,11 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+
+
+ /* Check sane level and page Id */
+- if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL) {
++ if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+- }
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID) {
++ if (chunkId > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+- }
+
+ /* First check we're tall enough (ie enough topLevel) */
+
+@@ -1451,7 +1473,7 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+
+
+ if (requiredTallness > fStruct->topLevel) {
+- /* Not tall enough,gotta make the tree taller */
++ /* Not tall enough, gotta make the tree taller */
+ for (i = fStruct->topLevel; i < requiredTallness; i++) {
+
+ tn = yaffs_GetTnode(dev);
+@@ -1473,27 +1495,27 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+ l = fStruct->topLevel;
+ tn = fStruct->top;
+
+- if(l > 0) {
++ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunkId >>
+- ( YAFFS_TNODES_LEVEL0_BITS +
++ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+
+- if((l>1) && !tn->internal[x]){
++ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_GetTnode(dev);
+
+- } else if(l == 1) {
++ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+- if (passedTn) {
++ if (passedTn) {
+ /* If we already have one, then release it.*/
+- if(tn->internal[x])
+- yaffs_FreeTnode(dev,tn->internal[x]);
++ if (tn->internal[x])
++ yaffs_FreeTnode(dev, tn->internal[x]);
+ tn->internal[x] = passedTn;
+
+- } else if(!tn->internal[x]) {
++ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_GetTnode(dev);
+ }
+@@ -1504,31 +1526,29 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+ }
+ } else {
+ /* top is level 0 */
+- if(passedTn) {
+- memcpy(tn,passedTn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+- yaffs_FreeTnode(dev,passedTn);
++ if (passedTn) {
++ memcpy(tn, passedTn, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++ yaffs_FreeTnode(dev, passedTn);
+ }
+ }
+
+ return tn;
+ }
+
+-static int yaffs_FindChunkInGroup(yaffs_Device * dev, int theChunk,
+- yaffs_ExtendedTags * tags, int objectId,
+- int chunkInInode)
++static int yaffs_FindChunkInGroup(yaffs_Device *dev, int theChunk,
++ yaffs_ExtendedTags *tags, int objectId,
++ int chunkInInode)
+ {
+ int j;
+
+ for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
+- if (yaffs_CheckChunkBit
+- (dev, theChunk / dev->nChunksPerBlock,
+- theChunk % dev->nChunksPerBlock)) {
++ if (yaffs_CheckChunkBit(dev, theChunk / dev->nChunksPerBlock,
++ theChunk % dev->nChunksPerBlock)) {
+ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
+ tags);
+ if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
+ /* found it; */
+ return theChunk;
+-
+ }
+ }
+ theChunk++;
+@@ -1543,7 +1563,7 @@ static int yaffs_FindChunkInGroup(yaffs_
+ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
+ */
+
+-static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
++static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
+ int chunkOffset, int *limit)
+ {
+ int i;
+@@ -1557,7 +1577,6 @@ static int yaffs_DeleteWorker(yaffs_Obje
+
+ if (tn) {
+ if (level > 0) {
+-
+ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+@@ -1565,17 +1584,17 @@ static int yaffs_DeleteWorker(yaffs_Obje
+ allDone = 0;
+ } else {
+ allDone =
+- yaffs_DeleteWorker(in,
+- tn->
+- internal
+- [i],
+- level -
+- 1,
+- (chunkOffset
++ yaffs_DeleteWorker(in,
++ tn->
++ internal
++ [i],
++ level -
++ 1,
++ (chunkOffset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+- + i,
+- limit);
++ + i,
++ limit);
+ }
+ if (allDone) {
+ yaffs_FreeTnode(dev,
+@@ -1584,27 +1603,25 @@ static int yaffs_DeleteWorker(yaffs_Obje
+ tn->internal[i] = NULL;
+ }
+ }
+-
+ }
+ return (allDone) ? 1 : 0;
+ } else if (level == 0) {
+ int hitLimit = 0;
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
+- i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++ i--) {
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+ if (theChunk) {
+
+- chunkInInode =
+- (chunkOffset <<
+- YAFFS_TNODES_LEVEL0_BITS) + i;
++ chunkInInode = (chunkOffset <<
++ YAFFS_TNODES_LEVEL0_BITS) + i;
+
+ foundChunk =
+- yaffs_FindChunkInGroup(dev,
+- theChunk,
+- &tags,
+- in->objectId,
+- chunkInInode);
++ yaffs_FindChunkInGroup(dev,
++ theChunk,
++ &tags,
++ in->objectId,
++ chunkInInode);
+
+ if (foundChunk > 0) {
+ yaffs_DeleteChunk(dev,
+@@ -1613,14 +1630,13 @@ static int yaffs_DeleteWorker(yaffs_Obje
+ in->nDataChunks--;
+ if (limit) {
+ *limit = *limit - 1;
+- if (*limit <= 0) {
++ if (*limit <= 0)
+ hitLimit = 1;
+- }
+ }
+
+ }
+
+- yaffs_PutLevel0Tnode(dev,tn,i,0);
++ yaffs_PutLevel0Tnode(dev, tn, i, 0);
+ }
+
+ }
+@@ -1634,9 +1650,8 @@ static int yaffs_DeleteWorker(yaffs_Obje
+
+ }
+
+-static void yaffs_SoftDeleteChunk(yaffs_Device * dev, int chunk)
++static void yaffs_SoftDeleteChunk(yaffs_Device *dev, int chunk)
+ {
+-
+ yaffs_BlockInfo *theBlock;
+
+ T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+@@ -1654,7 +1669,7 @@ static void yaffs_SoftDeleteChunk(yaffs_
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+-static int yaffs_SoftDeleteWorker(yaffs_Object * in, yaffs_Tnode * tn,
++static int yaffs_SoftDeleteWorker(yaffs_Object *in, yaffs_Tnode *tn,
+ __u32 level, int chunkOffset)
+ {
+ int i;
+@@ -1691,14 +1706,14 @@ static int yaffs_SoftDeleteWorker(yaffs_
+ } else if (level == 0) {
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+ if (theChunk) {
+ /* Note this does not find the real chunk, only the chunk group.
+ * We make an assumption that a chunk group is not larger than
+ * a block.
+ */
+ yaffs_SoftDeleteChunk(dev, theChunk);
+- yaffs_PutLevel0Tnode(dev,tn,i,0);
++ yaffs_PutLevel0Tnode(dev, tn, i, 0);
+ }
+
+ }
+@@ -1712,7 +1727,7 @@ static int yaffs_SoftDeleteWorker(yaffs_
+
+ }
+
+-static void yaffs_SoftDeleteFile(yaffs_Object * obj)
++static void yaffs_SoftDeleteFile(yaffs_Object *obj)
+ {
+ if (obj->deleted &&
+ obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
+@@ -1746,8 +1761,8 @@ static void yaffs_SoftDeleteFile(yaffs_O
+ * by a special case.
+ */
+
+-static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device * dev, yaffs_Tnode * tn,
+- __u32 level, int del0)
++static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn,
++ __u32 level, int del0)
+ {
+ int i;
+ int hasData;
+@@ -1763,9 +1778,8 @@ static yaffs_Tnode *yaffs_PruneWorker(ya
+ (i == 0) ? del0 : 1);
+ }
+
+- if (tn->internal[i]) {
++ if (tn->internal[i])
+ hasData++;
+- }
+ }
+
+ if (hasData == 0 && del0) {
+@@ -1781,8 +1795,8 @@ static yaffs_Tnode *yaffs_PruneWorker(ya
+
+ }
+
+-static int yaffs_PruneFileStructure(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct)
++static int yaffs_PruneFileStructure(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct)
+ {
+ int i;
+ int hasData;
+@@ -1805,9 +1819,8 @@ static int yaffs_PruneFileStructure(yaff
+
+ hasData = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+- if (tn->internal[i]) {
++ if (tn->internal[i])
+ hasData++;
+- }
+ }
+
+ if (!hasData) {
+@@ -1828,7 +1841,7 @@ static int yaffs_PruneFileStructure(yaff
+ /* yaffs_CreateFreeObjects creates a bunch more objects and
+ * adds them to the object free list.
+ */
+-static int yaffs_CreateFreeObjects(yaffs_Device * dev, int nObjects)
++static int yaffs_CreateFreeObjects(yaffs_Device *dev, int nObjects)
+ {
+ int i;
+ yaffs_Object *newObjects;
+@@ -1842,9 +1855,9 @@ static int yaffs_CreateFreeObjects(yaffs
+ list = YMALLOC(sizeof(yaffs_ObjectList));
+
+ if (!newObjects || !list) {
+- if(newObjects)
++ if (newObjects)
+ YFREE(newObjects);
+- if(list)
++ if (list)
+ YFREE(list);
+ T(YAFFS_TRACE_ALLOCATE,
+ (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
+@@ -1854,7 +1867,7 @@ static int yaffs_CreateFreeObjects(yaffs
+ /* Hook them into the free list */
+ for (i = 0; i < nObjects - 1; i++) {
+ newObjects[i].siblings.next =
+- (struct list_head *)(&newObjects[i + 1]);
++ (struct ylist_head *)(&newObjects[i + 1]);
+ }
+
+ newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
+@@ -1873,85 +1886,109 @@ static int yaffs_CreateFreeObjects(yaffs
+
+
+ /* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+-static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device * dev)
++static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device *dev)
+ {
+ yaffs_Object *tn = NULL;
+
++#ifdef VALGRIND_TEST
++ tn = YMALLOC(sizeof(yaffs_Object));
++#else
+ /* If there are none left make more */
+- if (!dev->freeObjects) {
++ if (!dev->freeObjects)
+ yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
+- }
+
+ if (dev->freeObjects) {
+ tn = dev->freeObjects;
+ dev->freeObjects =
+- (yaffs_Object *) (dev->freeObjects->siblings.next);
++ (yaffs_Object *) (dev->freeObjects->siblings.next);
+ dev->nFreeObjects--;
+-
++ }
++#endif
++ if (tn) {
+ /* Now sweeten it up... */
+
+ memset(tn, 0, sizeof(yaffs_Object));
++ tn->beingCreated = 1;
++
+ tn->myDev = dev;
+- tn->chunkId = -1;
++ tn->hdrChunk = 0;
+ tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
+- INIT_LIST_HEAD(&(tn->hardLinks));
+- INIT_LIST_HEAD(&(tn->hashLink));
+- INIT_LIST_HEAD(&tn->siblings);
++ YINIT_LIST_HEAD(&(tn->hardLinks));
++ YINIT_LIST_HEAD(&(tn->hashLink));
++ YINIT_LIST_HEAD(&tn->siblings);
++
++
++ /* Now make the directory sane */
++ if (dev->rootDir) {
++ tn->parent = dev->rootDir;
++ ylist_add(&(tn->siblings), &dev->rootDir->variant.directoryVariant.children);
++ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lostNFound in lostNFound so
+ * check if lostNFound exists first
+ */
+- if (dev->lostNFoundDir) {
++ if (dev->lostNFoundDir)
+ yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
+- }
++
++ tn->beingCreated = 0;
+ }
+
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++
+ return tn;
+ }
+
+-static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device * dev, int number,
++static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device *dev, int number,
+ __u32 mode)
+ {
+
+ yaffs_Object *obj =
+ yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (obj) {
+- obj->fake = 1; /* it is fake so it has no NAND presence... */
++ obj->fake = 1; /* it is fake so it might have no NAND presence... */
+ obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */
+ obj->unlinkAllowed = 0; /* ... or unlink it */
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->myDev = dev;
+- obj->chunkId = 0; /* Not a valid chunk. */
++ obj->hdrChunk = 0; /* Not a valid chunk. */
+ }
+
+ return obj;
+
+ }
+
+-static void yaffs_UnhashObject(yaffs_Object * tn)
++static void yaffs_UnhashObject(yaffs_Object *tn)
+ {
+ int bucket;
+ yaffs_Device *dev = tn->myDev;
+
+ /* If it is still linked into the bucket list, free from the list */
+- if (!list_empty(&tn->hashLink)) {
+- list_del_init(&tn->hashLink);
++ if (!ylist_empty(&tn->hashLink)) {
++ ylist_del_init(&tn->hashLink);
+ bucket = yaffs_HashFunction(tn->objectId);
+ dev->objectBucket[bucket].count--;
+ }
+-
+ }
+
+ /* FreeObject frees up a Object and puts it back on the free list */
+-static void yaffs_FreeObject(yaffs_Object * tn)
++static void yaffs_FreeObject(yaffs_Object *tn)
+ {
+-
+ yaffs_Device *dev = tn->myDev;
+
+-#ifdef __KERNEL__
++#ifdef __KERNEL__
++ T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode));
++#endif
++
++ if (tn->parent)
++ YBUG();
++ if (!ylist_empty(&tn->siblings))
++ YBUG();
++
++
++#ifdef __KERNEL__
+ if (tn->myInode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+@@ -1963,24 +2000,28 @@ static void yaffs_FreeObject(yaffs_Objec
+
+ yaffs_UnhashObject(tn);
+
++#ifdef VALGRIND_TEST
++ YFREE(tn);
++#else
+ /* Link into the free list. */
+- tn->siblings.next = (struct list_head *)(dev->freeObjects);
++ tn->siblings.next = (struct ylist_head *)(dev->freeObjects);
+ dev->freeObjects = tn;
+ dev->nFreeObjects++;
++#endif
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+ }
+
+ #ifdef __KERNEL__
+
+-void yaffs_HandleDeferedFree(yaffs_Object * obj)
++void yaffs_HandleDeferedFree(yaffs_Object *obj)
+ {
+- if (obj->deferedFree) {
++ if (obj->deferedFree)
+ yaffs_FreeObject(obj);
+- }
+ }
+
+ #endif
+
+-static void yaffs_DeinitialiseObjects(yaffs_Device * dev)
++static void yaffs_DeinitialiseObjects(yaffs_Device *dev)
+ {
+ /* Free the list of allocated Objects */
+
+@@ -1998,7 +2039,7 @@ static void yaffs_DeinitialiseObjects(ya
+ dev->nFreeObjects = 0;
+ }
+
+-static void yaffs_InitialiseObjects(yaffs_Device * dev)
++static void yaffs_InitialiseObjects(yaffs_Device *dev)
+ {
+ int i;
+
+@@ -2007,15 +2048,14 @@ static void yaffs_InitialiseObjects(yaff
+ dev->nFreeObjects = 0;
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+- INIT_LIST_HEAD(&dev->objectBucket[i].list);
++ YINIT_LIST_HEAD(&dev->objectBucket[i].list);
+ dev->objectBucket[i].count = 0;
+ }
+-
+ }
+
+-static int yaffs_FindNiceObjectBucket(yaffs_Device * dev)
++static int yaffs_FindNiceObjectBucket(yaffs_Device *dev)
+ {
+- static int x = 0;
++ static int x;
+ int i;
+ int l = 999;
+ int lowest = 999999;
+@@ -2049,7 +2089,7 @@ static int yaffs_FindNiceObjectBucket(ya
+ return l;
+ }
+
+-static int yaffs_CreateNewObjectNumber(yaffs_Device * dev)
++static int yaffs_CreateNewObjectNumber(yaffs_Device *dev)
+ {
+ int bucket = yaffs_FindNiceObjectBucket(dev);
+
+@@ -2058,7 +2098,7 @@ static int yaffs_CreateNewObjectNumber(y
+ */
+
+ int found = 0;
+- struct list_head *i;
++ struct ylist_head *i;
+
+ __u32 n = (__u32) bucket;
+
+@@ -2068,41 +2108,38 @@ static int yaffs_CreateNewObjectNumber(y
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->objectBucket[bucket].count > 0) {
+- list_for_each(i, &dev->objectBucket[bucket].list) {
++ ylist_for_each(i, &dev->objectBucket[bucket].list) {
+ /* If there is already one in the list */
+- if (i
+- && list_entry(i, yaffs_Object,
+- hashLink)->objectId == n) {
++ if (i && ylist_entry(i, yaffs_Object,
++ hashLink)->objectId == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+
+-
+ return n;
+ }
+
+-static void yaffs_HashObject(yaffs_Object * in)
++static void yaffs_HashObject(yaffs_Object *in)
+ {
+ int bucket = yaffs_HashFunction(in->objectId);
+ yaffs_Device *dev = in->myDev;
+
+- list_add(&in->hashLink, &dev->objectBucket[bucket].list);
++ ylist_add(&in->hashLink, &dev->objectBucket[bucket].list);
+ dev->objectBucket[bucket].count++;
+-
+ }
+
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number)
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number)
+ {
+ int bucket = yaffs_HashFunction(number);
+- struct list_head *i;
++ struct ylist_head *i;
+ yaffs_Object *in;
+
+- list_for_each(i, &dev->objectBucket[bucket].list) {
++ ylist_for_each(i, &dev->objectBucket[bucket].list) {
+ /* Look if it is in the list */
+ if (i) {
+- in = list_entry(i, yaffs_Object, hashLink);
++ in = ylist_entry(i, yaffs_Object, hashLink);
+ if (in->objectId == number) {
+ #ifdef __KERNEL__
+ /* Don't tell the VFS about this one if it is defered free */
+@@ -2118,31 +2155,27 @@ yaffs_Object *yaffs_FindObjectByNumber(y
+ return NULL;
+ }
+
+-yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
++yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
+ yaffs_ObjectType type)
+ {
+-
+ yaffs_Object *theObject;
+- yaffs_Tnode *tn;
++ yaffs_Tnode *tn = NULL;
+
+- if (number < 0) {
++ if (number < 0)
+ number = yaffs_CreateNewObjectNumber(dev);
+- }
+
+ theObject = yaffs_AllocateEmptyObject(dev);
+- if(!theObject)
++ if (!theObject)
+ return NULL;
+
+- if(type == YAFFS_OBJECT_TYPE_FILE){
++ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_GetTnode(dev);
+- if(!tn){
++ if (!tn) {
+ yaffs_FreeObject(theObject);
+ return NULL;
+ }
+ }
+
+-
+-
+ if (theObject) {
+ theObject->fake = 0;
+ theObject->renameAllowed = 1;
+@@ -2171,8 +2204,8 @@ yaffs_Object *yaffs_CreateNewObject(yaff
+ theObject->variant.fileVariant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+- INIT_LIST_HEAD(&theObject->variant.directoryVariant.
+- children);
++ YINIT_LIST_HEAD(&theObject->variant.directoryVariant.
++ children);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+@@ -2188,32 +2221,30 @@ yaffs_Object *yaffs_CreateNewObject(yaff
+ return theObject;
+ }
+
+-static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device * dev,
++static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device *dev,
+ int number,
+ yaffs_ObjectType type)
+ {
+ yaffs_Object *theObject = NULL;
+
+- if (number > 0) {
++ if (number > 0)
+ theObject = yaffs_FindObjectByNumber(dev, number);
+- }
+
+- if (!theObject) {
++ if (!theObject)
+ theObject = yaffs_CreateNewObject(dev, number, type);
+- }
+
+ return theObject;
+
+ }
+
+
+-static YCHAR *yaffs_CloneString(const YCHAR * str)
++static YCHAR *yaffs_CloneString(const YCHAR *str)
+ {
+ YCHAR *newStr = NULL;
+
+ if (str && *str) {
+ newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
+- if(newStr)
++ if (newStr)
+ yaffs_strcpy(newStr, str);
+ }
+
+@@ -2229,29 +2260,31 @@ static YCHAR *yaffs_CloneString(const YC
+ */
+
+ static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
+- yaffs_Object * parent,
+- const YCHAR * name,
++ yaffs_Object *parent,
++ const YCHAR *name,
+ __u32 mode,
+ __u32 uid,
+ __u32 gid,
+- yaffs_Object * equivalentObject,
+- const YCHAR * aliasString, __u32 rdev)
++ yaffs_Object *equivalentObject,
++ const YCHAR *aliasString, __u32 rdev)
+ {
+ yaffs_Object *in;
+- YCHAR *str;
++ YCHAR *str = NULL;
+
+ yaffs_Device *dev = parent->myDev;
+
+ /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
+- if (yaffs_FindObjectByName(parent, name)) {
++ if (yaffs_FindObjectByName(parent, name))
+ return NULL;
+- }
+
+ in = yaffs_CreateNewObject(dev, -1, type);
+
+- if(type == YAFFS_OBJECT_TYPE_SYMLINK){
++ if (!in)
++ return YAFFS_FAIL;
++
++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_CloneString(aliasString);
+- if(!str){
++ if (!str) {
+ yaffs_FreeObject(in);
+ return NULL;
+ }
+@@ -2260,7 +2293,7 @@ static yaffs_Object *yaffs_MknodObject(y
+
+
+ if (in) {
+- in->chunkId = -1;
++ in->hdrChunk = 0;
+ in->valid = 1;
+ in->variantType = type;
+
+@@ -2293,10 +2326,10 @@ static yaffs_Object *yaffs_MknodObject(y
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardLinkVariant.equivalentObject =
+- equivalentObject;
++ equivalentObject;
+ in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObject->objectId;
+- list_add(&in->hardLinks, &equivalentObject->hardLinks);
++ equivalentObject->objectId;
++ ylist_add(&in->hardLinks, &equivalentObject->hardLinks);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+@@ -2308,7 +2341,7 @@ static yaffs_Object *yaffs_MknodObject(y
+
+ if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
+ /* Could not create the object header, fail the creation */
+- yaffs_DestroyObject(in);
++ yaffs_DeleteObject(in);
+ in = NULL;
+ }
+
+@@ -2317,38 +2350,38 @@ static yaffs_Object *yaffs_MknodObject(y
+ return in;
+ }
+
+-yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid)
++yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+- uid, gid, NULL, NULL, 0);
++ uid, gid, NULL, NULL, 0);
+ }
+
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid)
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+ }
+
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+ }
+
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid,
+- const YCHAR * alias)
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid,
++ const YCHAR *alias)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+- uid, gid, NULL, alias, 0);
++ uid, gid, NULL, alias, 0);
+ }
+
+ /* yaffs_Link returns the object id of the equivalent object.*/
+-yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
+- yaffs_Object * equivalentObject)
++yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
++ yaffs_Object *equivalentObject)
+ {
+ /* Get the real object in case we were fed a hard link as an equivalent object */
+ equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
+@@ -2363,33 +2396,31 @@ yaffs_Object *yaffs_Link(yaffs_Object *
+
+ }
+
+-static int yaffs_ChangeObjectName(yaffs_Object * obj, yaffs_Object * newDir,
+- const YCHAR * newName, int force, int shadows)
++static int yaffs_ChangeObjectName(yaffs_Object *obj, yaffs_Object *newDir,
++ const YCHAR *newName, int force, int shadows)
+ {
+ int unlinkOp;
+ int deleteOp;
+
+ yaffs_Object *existingTarget;
+
+- if (newDir == NULL) {
++ if (newDir == NULL)
+ newDir = obj->parent; /* use the old directory */
+- }
+
+ if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragendy: yaffs_ChangeObjectName: newDir is not a directory"
++ ("tragedy: yaffs_ChangeObjectName: newDir is not a directory"
+ TENDSTR)));
+ YBUG();
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+- if (obj->myDev->isYaffs2) {
++ if (obj->myDev->isYaffs2)
+ unlinkOp = (newDir == obj->myDev->unlinkedDir);
+- } else {
++ else
+ unlinkOp = (newDir == obj->myDev->unlinkedDir
+ && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
+- }
+
+ deleteOp = (newDir == obj->myDev->deletedDir);
+
+@@ -2415,40 +2446,40 @@ static int yaffs_ChangeObjectName(yaffs_
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc purposes. */
+- if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows)>= 0)
++ if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows) >= 0)
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+ }
+
+-int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
+- yaffs_Object * newDir, const YCHAR * newName)
++int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
++ yaffs_Object *newDir, const YCHAR *newName)
+ {
+- yaffs_Object *obj;
+- yaffs_Object *existingTarget;
++ yaffs_Object *obj = NULL;
++ yaffs_Object *existingTarget = NULL;
+ int force = 0;
+
++
++ if (!oldDir || oldDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++ if (!newDir || newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++
+ #ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems (eg. WinCE).
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+- if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0) {
++ if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0)
+ force = 1;
+- }
+ #endif
+
++ else if (yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
++ /* ENAMETOOLONG */
++ return YAFFS_FAIL;
++
+ obj = yaffs_FindObjectByName(oldDir, oldName);
+- /* Check new name to long. */
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK &&
+- yaffs_strlen(newName) > YAFFS_MAX_ALIAS_LENGTH)
+- /* ENAMETOOLONG */
+- return YAFFS_FAIL;
+- else if (obj->variantType != YAFFS_OBJECT_TYPE_SYMLINK &&
+- yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
+- /* ENAMETOOLONG */
+- return YAFFS_FAIL;
+
+ if (obj && obj->renameAllowed) {
+
+@@ -2456,8 +2487,8 @@ int yaffs_RenameObject(yaffs_Object * ol
+
+ existingTarget = yaffs_FindObjectByName(newDir, newName);
+ if (existingTarget &&
+- existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !list_empty(&existingTarget->variant.directoryVariant.children)) {
++ existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&existingTarget->variant.directoryVariant.children)) {
+ /* There is a target that is a non-empty directory, so we fail */
+ return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
+ } else if (existingTarget && existingTarget != obj) {
+@@ -2465,7 +2496,7 @@ int yaffs_RenameObject(yaffs_Object * ol
+ * but only if it isn't the same object
+ */
+ yaffs_ChangeObjectName(obj, newDir, newName, force,
+- existingTarget->objectId);
++ existingTarget->objectId);
+ yaffs_UnlinkObject(existingTarget);
+ }
+
+@@ -2476,7 +2507,7 @@ int yaffs_RenameObject(yaffs_Object * ol
+
+ /*------------------------- Block Management and Page Allocation ----------------*/
+
+-static int yaffs_InitialiseBlocks(yaffs_Device * dev)
++static int yaffs_InitialiseBlocks(yaffs_Device *dev)
+ {
+ int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+
+@@ -2487,23 +2518,20 @@ static int yaffs_InitialiseBlocks(yaffs_
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
+- if(!dev->blockInfo){
++ if (!dev->blockInfo) {
+ dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
+ dev->blockInfoAlt = 1;
+- }
+- else
++ } else
+ dev->blockInfoAlt = 0;
+
+- if(dev->blockInfo){
+-
++ if (dev->blockInfo) {
+ /* Set up dynamic blockinfo stuff. */
+ dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
+ dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
+- if(!dev->chunkBits){
++ if (!dev->chunkBits) {
+ dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
+ dev->chunkBitsAlt = 1;
+- }
+- else
++ } else
+ dev->chunkBitsAlt = 0;
+ }
+
+@@ -2514,30 +2542,29 @@ static int yaffs_InitialiseBlocks(yaffs_
+ }
+
+ return YAFFS_FAIL;
+-
+ }
+
+-static void yaffs_DeinitialiseBlocks(yaffs_Device * dev)
++static void yaffs_DeinitialiseBlocks(yaffs_Device *dev)
+ {
+- if(dev->blockInfoAlt && dev->blockInfo)
++ if (dev->blockInfoAlt && dev->blockInfo)
+ YFREE_ALT(dev->blockInfo);
+- else if(dev->blockInfo)
++ else if (dev->blockInfo)
+ YFREE(dev->blockInfo);
+
+ dev->blockInfoAlt = 0;
+
+ dev->blockInfo = NULL;
+
+- if(dev->chunkBitsAlt && dev->chunkBits)
++ if (dev->chunkBitsAlt && dev->chunkBits)
+ YFREE_ALT(dev->chunkBits);
+- else if(dev->chunkBits)
++ else if (dev->chunkBits)
+ YFREE(dev->chunkBits);
+ dev->chunkBitsAlt = 0;
+ dev->chunkBits = NULL;
+ }
+
+-static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device * dev,
+- yaffs_BlockInfo * bi)
++static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev,
++ yaffs_BlockInfo *bi)
+ {
+ int i;
+ __u32 seq;
+@@ -2556,7 +2583,7 @@ static int yaffs_BlockNotDisqualifiedFro
+ seq = dev->sequenceNumber;
+
+ for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
+- i++) {
++ i++) {
+ b = yaffs_GetBlockInfo(dev, i);
+ if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
+ (b->pagesInUse - b->softDeletions) <
+@@ -2571,38 +2598,36 @@ static int yaffs_BlockNotDisqualifiedFro
+ * discarded pages.
+ */
+ return (bi->sequenceNumber <= dev->oldestDirtySequence);
+-
+ }
+
+ /* FindDiretiestBlock is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+-static int yaffs_FindBlockForGarbageCollection(yaffs_Device * dev,
+- int aggressive)
++static int yaffs_FindBlockForGarbageCollection(yaffs_Device *dev,
++ int aggressive)
+ {
+-
+ int b = dev->currentDirtyChecker;
+
+ int i;
+ int iterations;
+ int dirtiest = -1;
+ int pagesInUse = 0;
+- int prioritised=0;
++ int prioritised = 0;
+ yaffs_BlockInfo *bi;
+ int pendingPrioritisedExist = 0;
+
+ /* First let's see if we need to grab a prioritised block */
+- if(dev->hasPendingPrioritisedGCs){
+- for(i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++){
++ if (dev->hasPendingPrioritisedGCs) {
++ for (i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++) {
+
+ bi = yaffs_GetBlockInfo(dev, i);
+- //yaffs_VerifyBlock(dev,bi,i);
++ /* yaffs_VerifyBlock(dev,bi,i); */
+
+- if(bi->gcPrioritise) {
++ if (bi->gcPrioritise) {
+ pendingPrioritisedExist = 1;
+- if(bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)){
++ if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
++ yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ dirtiest = i;
+ prioritised = 1;
+@@ -2611,7 +2636,7 @@ static int yaffs_FindBlockForGarbageColl
+ }
+ }
+
+- if(!pendingPrioritisedExist) /* None found, so we can clear this */
++ if (!pendingPrioritisedExist) /* None found, so we can clear this */
+ dev->hasPendingPrioritisedGCs = 0;
+ }
+
+@@ -2623,31 +2648,28 @@ static int yaffs_FindBlockForGarbageColl
+
+ dev->nonAggressiveSkip--;
+
+- if (!aggressive && (dev->nonAggressiveSkip > 0)) {
++ if (!aggressive && (dev->nonAggressiveSkip > 0))
+ return -1;
+- }
+
+- if(!prioritised)
++ if (!prioritised)
+ pagesInUse =
+- (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
++ (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+
+- if (aggressive) {
++ if (aggressive)
+ iterations =
+ dev->internalEndBlock - dev->internalStartBlock + 1;
+- } else {
++ else {
+ iterations =
+ dev->internalEndBlock - dev->internalStartBlock + 1;
+ iterations = iterations / 16;
+- if (iterations > 200) {
++ if (iterations > 200)
+ iterations = 200;
+- }
+ }
+
+ for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
+ b++;
+- if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
++ if (b < dev->internalStartBlock || b > dev->internalEndBlock)
+ b = dev->internalStartBlock;
+- }
+
+ if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+@@ -2657,17 +2679,9 @@ static int yaffs_FindBlockForGarbageColl
+
+ bi = yaffs_GetBlockInfo(dev, b);
+
+-#if 0
+- if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
+- dirtiest = b;
+- pagesInUse = 0;
+- }
+- else
+-#endif
+-
+ if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
++ (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
++ yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+ dirtiest = b;
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ }
+@@ -2678,19 +2692,18 @@ static int yaffs_FindBlockForGarbageColl
+ if (dirtiest > 0) {
+ T(YAFFS_TRACE_GC,
+ (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
+- dev->nChunksPerBlock - pagesInUse,prioritised));
++ dev->nChunksPerBlock - pagesInUse, prioritised));
+ }
+
+ dev->oldestDirtySequence = 0;
+
+- if (dirtiest > 0) {
++ if (dirtiest > 0)
+ dev->nonAggressiveSkip = 4;
+- }
+
+ return dirtiest;
+ }
+
+-static void yaffs_BlockBecameDirty(yaffs_Device * dev, int blockNo)
++static void yaffs_BlockBecameDirty(yaffs_Device *dev, int blockNo)
+ {
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
+
+@@ -2752,7 +2765,7 @@ static void yaffs_BlockBecameDirty(yaffs
+ }
+ }
+
+-static int yaffs_FindBlockForAllocation(yaffs_Device * dev)
++static int yaffs_FindBlockForAllocation(yaffs_Device *dev)
+ {
+ int i;
+
+@@ -2763,7 +2776,7 @@ static int yaffs_FindBlockForAllocation(
+ * Can't get space to gc
+ */
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs tragedy: no more eraased blocks" TENDSTR)));
++ (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
+
+ return -1;
+ }
+@@ -2794,31 +2807,74 @@ static int yaffs_FindBlockForAllocation(
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("yaffs tragedy: no more eraased blocks, but there should have been %d"
++ ("yaffs tragedy: no more erased blocks, but there should have been %d"
+ TENDSTR), dev->nErasedBlocks));
+
+ return -1;
+ }
+
+
+-// Check if there's space to allocate...
+-// Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
+-static int yaffs_CheckSpaceForAllocation(yaffs_Device * dev)
++
++static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev)
++{
++ if (!dev->nCheckpointBlocksRequired &&
++ dev->isYaffs2) {
++ /* Not a valid value so recalculate */
++ int nBytes = 0;
++ int nBlocks;
++ int devBlocks = (dev->endBlock - dev->startBlock + 1);
++ int tnodeSize;
++
++ tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
++ nBytes += sizeof(yaffs_CheckpointValidity);
++ nBytes += sizeof(yaffs_CheckpointDevice);
++ nBytes += devBlocks * sizeof(yaffs_BlockInfo);
++ nBytes += devBlocks * dev->chunkBitmapStride;
++ nBytes += (sizeof(yaffs_CheckpointObject) + sizeof(__u32)) * (dev->nObjectsCreated - dev->nFreeObjects);
++ nBytes += (tnodeSize + sizeof(__u32)) * (dev->nTnodesCreated - dev->nFreeTnodes);
++ nBytes += sizeof(yaffs_CheckpointValidity);
++ nBytes += sizeof(__u32); /* checksum*/
++
++ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
++
++ nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->nChunksPerBlock)) + 3;
++
++ dev->nCheckpointBlocksRequired = nBlocks;
++ }
++
++ return dev->nCheckpointBlocksRequired;
++}
++
++/*
++ * Check if there's space to allocate...
++ * Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
++ */
++static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev)
+ {
+ int reservedChunks;
+ int reservedBlocks = dev->nReservedBlocks;
+ int checkpointBlocks;
+
+- checkpointBlocks = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
+- if(checkpointBlocks < 0)
++ if (dev->isYaffs2) {
++ checkpointBlocks = yaffs_CalcCheckpointBlocksRequired(dev) -
++ dev->blocksInCheckpoint;
++ if (checkpointBlocks < 0)
++ checkpointBlocks = 0;
++ } else {
+ checkpointBlocks = 0;
++ }
+
+ reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
+
+ return (dev->nFreeChunks > reservedChunks);
+ }
+
+-static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr)
++static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
++ yaffs_BlockInfo **blockUsedPtr)
+ {
+ int retVal;
+ yaffs_BlockInfo *bi;
+@@ -2835,7 +2891,7 @@ static int yaffs_AllocateChunk(yaffs_Dev
+ }
+
+ if (dev->nErasedBlocks < dev->nReservedBlocks
+- && dev->allocationPage == 0) {
++ && dev->allocationPage == 0) {
+ T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
+ }
+
+@@ -2844,10 +2900,10 @@ static int yaffs_AllocateChunk(yaffs_Dev
+ bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
+
+ retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
+- dev->allocationPage;
++ dev->allocationPage;
+ bi->pagesInUse++;
+ yaffs_SetChunkBit(dev, dev->allocationBlock,
+- dev->allocationPage);
++ dev->allocationPage);
+
+ dev->allocationPage++;
+
+@@ -2859,43 +2915,43 @@ static int yaffs_AllocateChunk(yaffs_Dev
+ dev->allocationBlock = -1;
+ }
+
+- if(blockUsedPtr)
++ if (blockUsedPtr)
+ *blockUsedPtr = bi;
+
+ return retVal;
+ }
+
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
++ (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+
+ return -1;
+ }
+
+-static int yaffs_GetErasedChunks(yaffs_Device * dev)
++static int yaffs_GetErasedChunks(yaffs_Device *dev)
+ {
+ int n;
+
+ n = dev->nErasedBlocks * dev->nChunksPerBlock;
+
+- if (dev->allocationBlock > 0) {
++ if (dev->allocationBlock > 0)
+ n += (dev->nChunksPerBlock - dev->allocationPage);
+- }
+
+ return n;
+
+ }
+
+-static int yaffs_GarbageCollectBlock(yaffs_Device * dev, int block)
++static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block,
++ int wholeBlock)
+ {
+ int oldChunk;
+ int newChunk;
+- int chunkInBlock;
+ int markNAND;
+ int retVal = YAFFS_OK;
+ int cleanups = 0;
+ int i;
+ int isCheckpointBlock;
+ int matchingChunk;
++ int maxCopies;
+
+ int chunksBefore = yaffs_GetErasedChunks(dev);
+ int chunksAfter;
+@@ -2911,8 +2967,11 @@ static int yaffs_GarbageCollectBlock(yaf
+ bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
+
+ T(YAFFS_TRACE_TRACING,
+- (TSTR("Collecting block %d, in use %d, shrink %d, " TENDSTR), block,
+- bi->pagesInUse, bi->hasShrinkHeader));
++ (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
++ block,
++ bi->pagesInUse,
++ bi->hasShrinkHeader,
++ wholeBlock));
+
+ /*yaffs_VerifyFreeChunks(dev); */
+
+@@ -2926,26 +2985,33 @@ static int yaffs_GarbageCollectBlock(yaf
+ dev->isDoingGC = 1;
+
+ if (isCheckpointBlock ||
+- !yaffs_StillSomeChunkBits(dev, block)) {
++ !yaffs_StillSomeChunkBits(dev, block)) {
+ T(YAFFS_TRACE_TRACING,
+- (TSTR
+- ("Collecting block %d that has no chunks in use" TENDSTR),
+- block));
++ (TSTR
++ ("Collecting block %d that has no chunks in use" TENDSTR),
++ block));
+ yaffs_BlockBecameDirty(dev, block);
+ } else {
+
+ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+- yaffs_VerifyBlock(dev,bi,block);
++ yaffs_VerifyBlock(dev, bi, block);
+
+- for (chunkInBlock = 0, oldChunk = block * dev->nChunksPerBlock;
+- chunkInBlock < dev->nChunksPerBlock
+- && yaffs_StillSomeChunkBits(dev, block);
+- chunkInBlock++, oldChunk++) {
+- if (yaffs_CheckChunkBit(dev, block, chunkInBlock)) {
++ maxCopies = (wholeBlock) ? dev->nChunksPerBlock : 10;
++ oldChunk = block * dev->nChunksPerBlock + dev->gcChunk;
++
++ for (/* init already done */;
++ retVal == YAFFS_OK &&
++ dev->gcChunk < dev->nChunksPerBlock &&
++ (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) &&
++ maxCopies > 0;
++ dev->gcChunk++, oldChunk++) {
++ if (yaffs_CheckChunkBit(dev, block, dev->gcChunk)) {
+
+ /* This page is in use and might need to be copied off */
+
++ maxCopies--;
++
+ markNAND = 1;
+
+ yaffs_InitialiseTags(&tags);
+@@ -2959,22 +3025,22 @@ static int yaffs_GarbageCollectBlock(yaf
+
+ T(YAFFS_TRACE_GC_DETAIL,
+ (TSTR
+- ("Collecting page %d, %d %d %d " TENDSTR),
+- chunkInBlock, tags.objectId, tags.chunkId,
++ ("Collecting chunk in block %d, %d %d %d " TENDSTR),
++ dev->gcChunk, tags.objectId, tags.chunkId,
+ tags.byteCount));
+
+- if(object && !yaffs_SkipVerification(dev)){
+- if(tags.chunkId == 0)
+- matchingChunk = object->chunkId;
+- else if(object->softDeleted)
++ if (object && !yaffs_SkipVerification(dev)) {
++ if (tags.chunkId == 0)
++ matchingChunk = object->hdrChunk;
++ else if (object->softDeleted)
+ matchingChunk = oldChunk; /* Defeat the test */
+ else
+- matchingChunk = yaffs_FindChunkInFile(object,tags.chunkId,NULL);
++ matchingChunk = yaffs_FindChunkInFile(object, tags.chunkId, NULL);
+
+- if(oldChunk != matchingChunk)
++ if (oldChunk != matchingChunk)
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
+- oldChunk,matchingChunk,tags.objectId, tags.chunkId));
++ oldChunk, matchingChunk, tags.objectId, tags.chunkId));
+
+ }
+
+@@ -2986,9 +3052,11 @@ static int yaffs_GarbageCollectBlock(yaf
+ tags.objectId, tags.chunkId, tags.byteCount));
+ }
+
+- if (object && object->deleted
+- && tags.chunkId != 0) {
+- /* Data chunk in a deleted file, throw it away
++ if (object &&
++ object->deleted &&
++ object->softDeleted &&
++ tags.chunkId != 0) {
++ /* Data chunk in a soft deleted file, throw it away
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget about it and
+ * fix up the object.
+@@ -3003,13 +3071,12 @@ static int yaffs_GarbageCollectBlock(yaf
+ cleanups++;
+ }
+ markNAND = 0;
+- } else if (0
+- /* Todo object && object->deleted && object->nDataChunks == 0 */
+- ) {
++ } else if (0) {
++ /* Todo object && object->deleted && object->nDataChunks == 0 */
+ /* Deleted object header with no data chunks.
+ * Can be discarded and the file deleted.
+ */
+- object->chunkId = 0;
++ object->hdrChunk = 0;
+ yaffs_FreeTnode(object->myDev,
+ object->variant.
+ fileVariant.top);
+@@ -3031,17 +3098,14 @@ static int yaffs_GarbageCollectBlock(yaf
+ * We need to nuke the shrinkheader flags first
+ * We no longer want the shrinkHeader flag since its work is done
+ * and if it is left in place it will mess up scanning.
+- * Also, clear out any shadowing stuff
+ */
+
+ yaffs_ObjectHeader *oh;
+ oh = (yaffs_ObjectHeader *)buffer;
+ oh->isShrink = 0;
+- oh->shadowsObject = -1;
+- tags.extraShadows = 0;
+ tags.extraIsShrinkHeader = 0;
+
+- yaffs_VerifyObjectHeader(object,oh,&tags,1);
++ yaffs_VerifyObjectHeader(object, oh, &tags, 1);
+ }
+
+ newChunk =
+@@ -3055,7 +3119,7 @@ static int yaffs_GarbageCollectBlock(yaf
+
+ if (tags.chunkId == 0) {
+ /* It's a header */
+- object->chunkId = newChunk;
++ object->hdrChunk = newChunk;
+ object->serial = tags.serialNumber;
+ } else {
+ /* It's a data chunk */
+@@ -3067,7 +3131,8 @@ static int yaffs_GarbageCollectBlock(yaf
+ }
+ }
+
+- yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
++ if (retVal == YAFFS_OK)
++ yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
+
+ }
+ }
+@@ -3098,18 +3163,25 @@ static int yaffs_GarbageCollectBlock(yaf
+
+ }
+
+- yaffs_VerifyCollectedBlock(dev,bi,block);
++ yaffs_VerifyCollectedBlock(dev, bi, block);
+
+- if (chunksBefore >= (chunksAfter = yaffs_GetErasedChunks(dev))) {
++ chunksAfter = yaffs_GetErasedChunks(dev);
++ if (chunksBefore >= chunksAfter) {
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("gc did not increase free chunks before %d after %d"
+ TENDSTR), chunksBefore, chunksAfter));
+ }
+
++ /* If the gc completed then clear the current gcBlock so that we find another. */
++ if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING) {
++ dev->gcBlock = -1;
++ dev->gcChunk = 0;
++ }
++
+ dev->isDoingGC = 0;
+
+- return YAFFS_OK;
++ return retVal;
+ }
+
+ /* New garbage collector
+@@ -3121,7 +3193,7 @@ static int yaffs_GarbageCollectBlock(yaf
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+-static int yaffs_CheckGarbageCollection(yaffs_Device * dev)
++static int yaffs_CheckGarbageCollection(yaffs_Device *dev)
+ {
+ int block;
+ int aggressive;
+@@ -3142,8 +3214,8 @@ static int yaffs_CheckGarbageCollection(
+ do {
+ maxTries++;
+
+- checkpointBlockAdjust = (dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint);
+- if(checkpointBlockAdjust < 0)
++ checkpointBlockAdjust = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
++ if (checkpointBlockAdjust < 0)
+ checkpointBlockAdjust = 0;
+
+ if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
+@@ -3154,20 +3226,24 @@ static int yaffs_CheckGarbageCollection(
+ aggressive = 0;
+ }
+
+- block = yaffs_FindBlockForGarbageCollection(dev, aggressive);
++ if (dev->gcBlock <= 0) {
++ dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive);
++ dev->gcChunk = 0;
++ }
++
++ block = dev->gcBlock;
+
+ if (block > 0) {
+ dev->garbageCollections++;
+- if (!aggressive) {
++ if (!aggressive)
+ dev->passiveGarbageCollections++;
+- }
+
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
+ dev->nErasedBlocks, aggressive));
+
+- gcOk = yaffs_GarbageCollectBlock(dev, block);
++ gcOk = yaffs_GarbageCollectBlock(dev, block, aggressive);
+ }
+
+ if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
+@@ -3176,15 +3252,16 @@ static int yaffs_CheckGarbageCollection(
+ ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
+ TENDSTR), dev->nErasedBlocks, maxTries, block));
+ }
+- } while ((dev->nErasedBlocks < dev->nReservedBlocks) && (block > 0)
+- && (maxTries < 2));
++ } while ((dev->nErasedBlocks < dev->nReservedBlocks) &&
++ (block > 0) &&
++ (maxTries < 2));
+
+ return aggressive ? gcOk : YAFFS_OK;
+ }
+
+ /*------------------------- TAGS --------------------------------*/
+
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
++static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
+ int chunkInObject)
+ {
+ return (tags->chunkId == chunkInObject &&
+@@ -3195,8 +3272,8 @@ static int yaffs_TagsMatch(const yaffs_E
+
+ /*-------------------- Data file manipulation -----------------*/
+
+-static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
+- yaffs_ExtendedTags * tags)
++static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
++ yaffs_ExtendedTags *tags)
+ {
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ yaffs_Tnode *tn;
+@@ -3214,7 +3291,7 @@ static int yaffs_FindChunkInFile(yaffs_O
+ tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+
+ if (tn) {
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+
+ retVal =
+ yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+@@ -3223,8 +3300,8 @@ static int yaffs_FindChunkInFile(yaffs_O
+ return retVal;
+ }
+
+-static int yaffs_FindAndDeleteChunkInFile(yaffs_Object * in, int chunkInInode,
+- yaffs_ExtendedTags * tags)
++static int yaffs_FindAndDeleteChunkInFile(yaffs_Object *in, int chunkInInode,
++ yaffs_ExtendedTags *tags)
+ {
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ yaffs_Tnode *tn;
+@@ -3243,29 +3320,23 @@ static int yaffs_FindAndDeleteChunkInFil
+
+ if (tn) {
+
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+
+ retVal =
+ yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+ chunkInInode);
+
+ /* Delete the entry in the filestructure (if found) */
+- if (retVal != -1) {
+- yaffs_PutLevel0Tnode(dev,tn,chunkInInode,0);
+- }
+- } else {
+- /*T(("No level 0 found for %d\n", chunkInInode)); */
++ if (retVal != -1)
++ yaffs_PutLevel0Tnode(dev, tn, chunkInInode, 0);
+ }
+
+- if (retVal == -1) {
+- /* T(("Could not find %d to delete\n",chunkInInode)); */
+- }
+ return retVal;
+ }
+
+ #ifdef YAFFS_PARANOID
+
+-static int yaffs_CheckFileSanity(yaffs_Object * in)
++static int yaffs_CheckFileSanity(yaffs_Object *in)
+ {
+ int chunk;
+ int nChunks;
+@@ -3278,10 +3349,8 @@ static int yaffs_CheckFileSanity(yaffs_O
+ int theChunk;
+ int chunkDeleted;
+
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+- /* T(("Object not a file\n")); */
++ if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+- }
+
+ objId = in->objectId;
+ fSize = in->variant.fileVariant.fileSize;
+@@ -3294,7 +3363,7 @@ static int yaffs_CheckFileSanity(yaffs_O
+
+ if (tn) {
+
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,chunk);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk);
+
+ if (yaffs_CheckChunkBits
+ (dev, theChunk / dev->nChunksPerBlock,
+@@ -3323,7 +3392,7 @@ static int yaffs_CheckFileSanity(yaffs_O
+
+ #endif
+
+-static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
++static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
+ int chunkInNAND, int inScan)
+ {
+ /* NB inScan is zero unless scanning.
+@@ -3358,11 +3427,10 @@ static int yaffs_PutChunkIntoFile(yaffs_
+ &in->variant.fileVariant,
+ chunkInInode,
+ NULL);
+- if (!tn) {
++ if (!tn)
+ return YAFFS_FAIL;
+- }
+
+- existingChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++ existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+
+ if (inScan != 0) {
+ /* If we're scanning then we need to test for duplicates
+@@ -3374,7 +3442,7 @@ static int yaffs_PutChunkIntoFile(yaffs_
+ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+ */
+
+- if (existingChunk != 0) {
++ if (existingChunk > 0) {
+ /* NB Right now existing chunk will not be real chunkId if the device >= 32MB
+ * thus we have to do a FindChunkInFile to get the real chunk id.
+ *
+@@ -3411,8 +3479,10 @@ static int yaffs_PutChunkIntoFile(yaffs_
+ * not be loaded during a scan
+ */
+
+- newSerial = newTags.serialNumber;
+- existingSerial = existingTags.serialNumber;
++ if (inScan > 0) {
++ newSerial = newTags.serialNumber;
++ existingSerial = existingTags.serialNumber;
++ }
+
+ if ((inScan > 0) &&
+ (in->myDev->isYaffs2 ||
+@@ -3437,24 +3507,23 @@ static int yaffs_PutChunkIntoFile(yaffs_
+
+ }
+
+- if (existingChunk == 0) {
++ if (existingChunk == 0)
+ in->nDataChunks++;
+- }
+
+- yaffs_PutLevel0Tnode(dev,tn,chunkInInode,chunkInNAND);
++ yaffs_PutLevel0Tnode(dev, tn, chunkInInode, chunkInNAND);
+
+ return YAFFS_OK;
+ }
+
+-static int yaffs_ReadChunkDataFromObject(yaffs_Object * in, int chunkInInode,
+- __u8 * buffer)
++static int yaffs_ReadChunkDataFromObject(yaffs_Object *in, int chunkInInode,
++ __u8 *buffer)
+ {
+ int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
+
+- if (chunkInNAND >= 0) {
++ if (chunkInNAND >= 0)
+ return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
+- buffer,NULL);
+- } else {
++ buffer, NULL);
++ else {
+ T(YAFFS_TRACE_NANDACCESS,
+ (TSTR("Chunk %d not found zero instead" TENDSTR),
+ chunkInNAND));
+@@ -3465,7 +3534,7 @@ static int yaffs_ReadChunkDataFromObject
+
+ }
+
+-void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn)
++void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn)
+ {
+ int block;
+ int page;
+@@ -3475,16 +3544,15 @@ void yaffs_DeleteChunk(yaffs_Device * de
+ if (chunkId <= 0)
+ return;
+
+-
+ dev->nDeletions++;
+ block = chunkId / dev->nChunksPerBlock;
+ page = chunkId % dev->nChunksPerBlock;
+
+
+- if(!yaffs_CheckChunkBit(dev,block,page))
++ if (!yaffs_CheckChunkBit(dev, block, page))
+ T(YAFFS_TRACE_VERIFY,
+- (TSTR("Deleting invalid chunk %d"TENDSTR),
+- chunkId));
++ (TSTR("Deleting invalid chunk %d"TENDSTR),
++ chunkId));
+
+ bi = yaffs_GetBlockInfo(dev, block);
+
+@@ -3524,14 +3592,12 @@ void yaffs_DeleteChunk(yaffs_Device * de
+ yaffs_BlockBecameDirty(dev, block);
+ }
+
+- } else {
+- /* T(("Bad news deleting chunk %d\n",chunkId)); */
+ }
+
+ }
+
+-static int yaffs_WriteChunkDataToObject(yaffs_Object * in, int chunkInInode,
+- const __u8 * buffer, int nBytes,
++static int yaffs_WriteChunkDataToObject(yaffs_Object *in, int chunkInInode,
++ const __u8 *buffer, int nBytes,
+ int useReserve)
+ {
+ /* Find old chunk Need to do this to get serial number
+@@ -3561,6 +3627,12 @@ static int yaffs_WriteChunkDataToObject(
+ (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
+ newTags.byteCount = nBytes;
+
++ if (nBytes < 1 || nBytes > dev->totalBytesPerChunk) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes));
++ YBUG();
++ }
++
+ newChunkId =
+ yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+ useReserve);
+@@ -3568,11 +3640,9 @@ static int yaffs_WriteChunkDataToObject(
+ if (newChunkId >= 0) {
+ yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
+
+- if (prevChunkId >= 0) {
++ if (prevChunkId >= 0)
+ yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
+
+- }
+-
+ yaffs_CheckFileSanity(in);
+ }
+ return newChunkId;
+@@ -3582,7 +3652,7 @@ static int yaffs_WriteChunkDataToObject(
+ /* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+-int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name, int force,
++int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, int force,
+ int isShrink, int shadows)
+ {
+
+@@ -3603,9 +3673,12 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+
+ yaffs_ObjectHeader *oh = NULL;
+
+- yaffs_strcpy(oldName,"silly old name");
++ yaffs_strcpy(oldName, _Y("silly old name"));
+
+- if (!in->fake || force) {
++
++ if (!in->fake ||
++ in == dev->rootDir || /* The rootDir should also be saved */
++ force) {
+
+ yaffs_CheckGarbageCollection(dev);
+ yaffs_CheckObjectDetailsLoaded(in);
+@@ -3613,13 +3686,13 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+ buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
+ oh = (yaffs_ObjectHeader *) buffer;
+
+- prevChunkId = in->chunkId;
++ prevChunkId = in->hdrChunk;
+
+- if (prevChunkId >= 0) {
++ if (prevChunkId > 0) {
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
+ buffer, &oldTags);
+
+- yaffs_VerifyObjectHeader(in,oh,&oldTags,0);
++ yaffs_VerifyObjectHeader(in, oh, &oldTags, 0);
+
+ memcpy(oldName, oh->name, sizeof(oh->name));
+ }
+@@ -3628,7 +3701,7 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+
+ oh->type = in->variantType;
+ oh->yst_mode = in->yst_mode;
+- oh->shadowsObject = shadows;
++ oh->shadowsObject = oh->inbandShadowsObject = shadows;
+
+ #ifdef CONFIG_YAFFS_WINCE
+ oh->win_atime[0] = in->win_atime[0];
+@@ -3645,20 +3718,18 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+ oh->yst_ctime = in->yst_ctime;
+ oh->yst_rdev = in->yst_rdev;
+ #endif
+- if (in->parent) {
++ if (in->parent)
+ oh->parentObjectId = in->parent->objectId;
+- } else {
++ else
+ oh->parentObjectId = 0;
+- }
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
+- } else if (prevChunkId>=0) {
++ } else if (prevChunkId >= 0)
+ memcpy(oh->name, oldName, sizeof(oh->name));
+- } else {
++ else
+ memset(oh->name, 0, sizeof(oh->name));
+- }
+
+ oh->isShrink = isShrink;
+
+@@ -3708,7 +3779,7 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+ newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
+ newTags.extraObjectType = in->variantType;
+
+- yaffs_VerifyObjectHeader(in,oh,&newTags,1);
++ yaffs_VerifyObjectHeader(in, oh, &newTags, 1);
+
+ /* Create new chunk in NAND */
+ newChunkId =
+@@ -3717,20 +3788,20 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+
+ if (newChunkId >= 0) {
+
+- in->chunkId = newChunkId;
++ in->hdrChunk = newChunkId;
+
+ if (prevChunkId >= 0) {
+ yaffs_DeleteChunk(dev, prevChunkId, 1,
+ __LINE__);
+ }
+
+- if(!yaffs_ObjectHasCachedWriteData(in))
++ if (!yaffs_ObjectHasCachedWriteData(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block that the chunk lives on */
+ if (isShrink) {
+ bi = yaffs_GetBlockInfo(in->myDev,
+- newChunkId /in->myDev-> nChunksPerBlock);
++ newChunkId / in->myDev->nChunksPerBlock);
+ bi->hasShrinkHeader = 1;
+ }
+
+@@ -3766,7 +3837,7 @@ static int yaffs_ObjectHasCachedWriteDat
+ yaffs_ChunkCache *cache;
+ int nCaches = obj->myDev->nShortOpCaches;
+
+- for(i = 0; i < nCaches; i++){
++ for (i = 0; i < nCaches; i++) {
+ cache = &dev->srCache[i];
+ if (cache->object == obj &&
+ cache->dirty)
+@@ -3777,7 +3848,7 @@ static int yaffs_ObjectHasCachedWriteDat
+ }
+
+
+-static void yaffs_FlushFilesChunkCache(yaffs_Object * obj)
++static void yaffs_FlushFilesChunkCache(yaffs_Object *obj)
+ {
+ yaffs_Device *dev = obj->myDev;
+ int lowest = -99; /* Stop compiler whining. */
+@@ -3844,16 +3915,16 @@ void yaffs_FlushEntireDeviceCache(yaffs_
+ */
+ do {
+ obj = NULL;
+- for( i = 0; i < nCaches && !obj; i++) {
++ for (i = 0; i < nCaches && !obj; i++) {
+ if (dev->srCache[i].object &&
+ dev->srCache[i].dirty)
+ obj = dev->srCache[i].object;
+
+ }
+- if(obj)
++ if (obj)
+ yaffs_FlushFilesChunkCache(obj);
+
+- } while(obj);
++ } while (obj);
+
+ }
+
+@@ -3863,41 +3934,21 @@ void yaffs_FlushEntireDeviceCache(yaffs_
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+-static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device * dev)
++static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device *dev)
+ {
+ int i;
+- int usage;
+- int theOne;
+
+ if (dev->nShortOpCaches > 0) {
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (!dev->srCache[i].object)
+ return &dev->srCache[i];
+ }
++ }
+
+- return NULL;
++ return NULL;
++}
+
+- theOne = -1;
+- usage = 0; /* just to stop the compiler grizzling */
+-
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (!dev->srCache[i].dirty &&
+- ((dev->srCache[i].lastUse < usage && theOne >= 0) ||
+- theOne < 0)) {
+- usage = dev->srCache[i].lastUse;
+- theOne = i;
+- }
+- }
+-
+-
+- return theOne >= 0 ? &dev->srCache[theOne] : NULL;
+- } else {
+- return NULL;
+- }
+-
+-}
+-
+-static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device * dev)
++static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device *dev)
+ {
+ yaffs_ChunkCache *cache;
+ yaffs_Object *theObj;
+@@ -3927,8 +3978,7 @@ static yaffs_ChunkCache *yaffs_GrabChunk
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->srCache[i].object &&
+ !dev->srCache[i].locked &&
+- (dev->srCache[i].lastUse < usage || !cache))
+- {
++ (dev->srCache[i].lastUse < usage || !cache)) {
+ usage = dev->srCache[i].lastUse;
+ theObj = dev->srCache[i].object;
+ cache = &dev->srCache[i];
+@@ -3950,7 +4000,7 @@ static yaffs_ChunkCache *yaffs_GrabChunk
+ }
+
+ /* Find a cached chunk */
+-static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object * obj,
++static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj,
+ int chunkId)
+ {
+ yaffs_Device *dev = obj->myDev;
+@@ -3969,7 +4019,7 @@ static yaffs_ChunkCache *yaffs_FindChunk
+ }
+
+ /* Mark the chunk for the least recently used algorithym */
+-static void yaffs_UseChunkCache(yaffs_Device * dev, yaffs_ChunkCache * cache,
++static void yaffs_UseChunkCache(yaffs_Device *dev, yaffs_ChunkCache *cache,
+ int isAWrite)
+ {
+
+@@ -3977,9 +4027,9 @@ static void yaffs_UseChunkCache(yaffs_De
+ if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
+ /* Reset the cache usages */
+ int i;
+- for (i = 1; i < dev->nShortOpCaches; i++) {
++ for (i = 1; i < dev->nShortOpCaches; i++)
+ dev->srCache[i].lastUse = 0;
+- }
++
+ dev->srLastUse = 0;
+ }
+
+@@ -3987,9 +4037,8 @@ static void yaffs_UseChunkCache(yaffs_De
+
+ cache->lastUse = dev->srLastUse;
+
+- if (isAWrite) {
++ if (isAWrite)
+ cache->dirty = 1;
+- }
+ }
+ }
+
+@@ -3997,21 +4046,20 @@ static void yaffs_UseChunkCache(yaffs_De
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+-static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId)
++static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId)
+ {
+ if (object->myDev->nShortOpCaches > 0) {
+ yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
+
+- if (cache) {
++ if (cache)
+ cache->object = NULL;
+- }
+ }
+ }
+
+ /* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in)
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in)
+ {
+ int i;
+ yaffs_Device *dev = in->myDev;
+@@ -4019,9 +4067,8 @@ static void yaffs_InvalidateWholeChunkCa
+ if (dev->nShortOpCaches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object == in) {
++ if (dev->srCache[i].object == in)
+ dev->srCache[i].object = NULL;
+- }
+ }
+ }
+ }
+@@ -4029,18 +4076,18 @@ static void yaffs_InvalidateWholeChunkCa
+ /*--------------------- Checkpointing --------------------*/
+
+
+-static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev,int head)
++static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev, int head)
+ {
+ yaffs_CheckpointValidity cp;
+
+- memset(&cp,0,sizeof(cp));
++ memset(&cp, 0, sizeof(cp));
+
+ cp.structType = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+- return (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp))?
++ return (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
+ 1 : 0;
+ }
+
+@@ -4049,9 +4096,9 @@ static int yaffs_ReadCheckpointValidityM
+ yaffs_CheckpointValidity cp;
+ int ok;
+
+- ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
++ ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+- if(ok)
++ if (ok)
+ ok = (cp.structType == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+@@ -4100,21 +4147,21 @@ static int yaffs_WriteCheckpointDevice(y
+ int ok;
+
+ /* Write device runtime values*/
+- yaffs_DeviceToCheckpointDevice(&cp,dev);
++ yaffs_DeviceToCheckpointDevice(&cp, dev);
+ cp.structType = sizeof(cp);
+
+- ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++ ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ /* Write block info */
+- if(ok) {
++ if (ok) {
+ nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+- ok = (yaffs_CheckpointWrite(dev,dev->blockInfo,nBytes) == nBytes);
++ ok = (yaffs_CheckpointWrite(dev, dev->blockInfo, nBytes) == nBytes);
+ }
+
+ /* Write chunk bits */
+- if(ok) {
++ if (ok) {
+ nBytes = nBlocks * dev->chunkBitmapStride;
+- ok = (yaffs_CheckpointWrite(dev,dev->chunkBits,nBytes) == nBytes);
++ ok = (yaffs_CheckpointWrite(dev, dev->chunkBits, nBytes) == nBytes);
+ }
+ return ok ? 1 : 0;
+
+@@ -4128,25 +4175,25 @@ static int yaffs_ReadCheckpointDevice(ya
+
+ int ok;
+
+- ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+- if(!ok)
++ ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
+ return 0;
+
+- if(cp.structType != sizeof(cp))
++ if (cp.structType != sizeof(cp))
+ return 0;
+
+
+- yaffs_CheckpointDeviceToDevice(dev,&cp);
++ yaffs_CheckpointDeviceToDevice(dev, &cp);
+
+ nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+
+- ok = (yaffs_CheckpointRead(dev,dev->blockInfo,nBytes) == nBytes);
++ ok = (yaffs_CheckpointRead(dev, dev->blockInfo, nBytes) == nBytes);
+
+- if(!ok)
++ if (!ok)
+ return 0;
+ nBytes = nBlocks * dev->chunkBitmapStride;
+
+- ok = (yaffs_CheckpointRead(dev,dev->chunkBits,nBytes) == nBytes);
++ ok = (yaffs_CheckpointRead(dev, dev->chunkBits, nBytes) == nBytes);
+
+ return ok ? 1 : 0;
+ }
+@@ -4157,7 +4204,7 @@ static void yaffs_ObjectToCheckpointObje
+
+ cp->objectId = obj->objectId;
+ cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
+- cp->chunkId = obj->chunkId;
++ cp->hdrChunk = obj->hdrChunk;
+ cp->variantType = obj->variantType;
+ cp->deleted = obj->deleted;
+ cp->softDeleted = obj->softDeleted;
+@@ -4168,20 +4215,28 @@ static void yaffs_ObjectToCheckpointObje
+ cp->serial = obj->serial;
+ cp->nDataChunks = obj->nDataChunks;
+
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
+- else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++ else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
+ }
+
+-static void yaffs_CheckpointObjectToObject( yaffs_Object *obj,yaffs_CheckpointObject *cp)
++static int yaffs_CheckpointObjectToObject(yaffs_Object *obj, yaffs_CheckpointObject *cp)
+ {
+
+ yaffs_Object *parent;
+
++ if (obj->variantType != cp->variantType) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
++ TCONT("chunk %d does not match existing object type %d")
++ TENDSTR), cp->objectId, cp->variantType, cp->hdrChunk,
++ obj->variantType));
++ return 0;
++ }
++
+ obj->objectId = cp->objectId;
+
+- if(cp->parentId)
++ if (cp->parentId)
+ parent = yaffs_FindOrCreateObjectByNumber(
+ obj->myDev,
+ cp->parentId,
+@@ -4189,10 +4244,19 @@ static void yaffs_CheckpointObjectToObje
+ else
+ parent = NULL;
+
+- if(parent)
++ if (parent) {
++ if (parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
++ TCONT(" chunk %d Parent type, %d, not directory")
++ TENDSTR),
++ cp->objectId, cp->parentId, cp->variantType,
++ cp->hdrChunk, parent->variantType));
++ return 0;
++ }
+ yaffs_AddObjectToDirectory(parent, obj);
++ }
+
+- obj->chunkId = cp->chunkId;
++ obj->hdrChunk = cp->hdrChunk;
+ obj->variantType = cp->variantType;
+ obj->deleted = cp->deleted;
+ obj->softDeleted = cp->softDeleted;
+@@ -4203,29 +4267,34 @@ static void yaffs_CheckpointObjectToObje
+ obj->serial = cp->serial;
+ obj->nDataChunks = cp->nDataChunks;
+
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
+- else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++ else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
+
+- if(obj->objectId >= YAFFS_NOBJECT_BUCKETS)
++ if (obj->hdrChunk > 0)
+ obj->lazyLoaded = 1;
++ return 1;
+ }
+
+
+
+-static int yaffs_CheckpointTnodeWorker(yaffs_Object * in, yaffs_Tnode * tn,
+- __u32 level, int chunkOffset)
++static int yaffs_CheckpointTnodeWorker(yaffs_Object *in, yaffs_Tnode *tn,
++ __u32 level, int chunkOffset)
+ {
+ int i;
+ yaffs_Device *dev = in->myDev;
+ int ok = 1;
+- int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
+
+ if (tn) {
+ if (level > 0) {
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs_CheckpointTnodeWorker(in,
+ tn->internal[i],
+@@ -4235,10 +4304,9 @@ static int yaffs_CheckpointTnodeWorker(y
+ }
+ } else if (level == 0) {
+ __u32 baseOffset = chunkOffset << YAFFS_TNODES_LEVEL0_BITS;
+- /* printf("write tnode at %d\n",baseOffset); */
+- ok = (yaffs_CheckpointWrite(dev,&baseOffset,sizeof(baseOffset)) == sizeof(baseOffset));
+- if(ok)
+- ok = (yaffs_CheckpointWrite(dev,tn,nTnodeBytes) == nTnodeBytes);
++ ok = (yaffs_CheckpointWrite(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
++ if (ok)
++ ok = (yaffs_CheckpointWrite(dev, tn, tnodeSize) == tnodeSize);
+ }
+ }
+
+@@ -4251,13 +4319,13 @@ static int yaffs_WriteCheckpointTnodes(y
+ __u32 endMarker = ~0;
+ int ok = 1;
+
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE){
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs_CheckpointTnodeWorker(obj,
+ obj->variant.fileVariant.top,
+ obj->variant.fileVariant.topLevel,
+ 0);
+- if(ok)
+- ok = (yaffs_CheckpointWrite(obj->myDev,&endMarker,sizeof(endMarker)) ==
++ if (ok)
++ ok = (yaffs_CheckpointWrite(obj->myDev, &endMarker, sizeof(endMarker)) ==
+ sizeof(endMarker));
+ }
+
+@@ -4272,38 +4340,38 @@ static int yaffs_ReadCheckpointTnodes(ya
+ yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
+ yaffs_Tnode *tn;
+ int nread = 0;
++ int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+- ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
+
+- while(ok && (~baseChunk)){
++ ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++ while (ok && (~baseChunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+
+- /* printf("read tnode at %d\n",baseChunk); */
+ tn = yaffs_GetTnodeRaw(dev);
+- if(tn)
+- ok = (yaffs_CheckpointRead(dev,tn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8) ==
+- (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++ if (tn)
++ ok = (yaffs_CheckpointRead(dev, tn, tnodeSize) == tnodeSize);
+ else
+ ok = 0;
+
+- if(tn && ok){
++ if (tn && ok)
+ ok = yaffs_AddOrFindLevel0Tnode(dev,
+- fileStructPtr,
+- baseChunk,
+- tn) ? 1 : 0;
++ fileStructPtr,
++ baseChunk,
++ tn) ? 1 : 0;
+
+- }
+-
+- if(ok)
+- ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
++ if (ok)
++ ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
+
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(
++ T(YAFFS_TRACE_CHECKPOINT, (
+ TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
+- nread,baseChunk,ok));
++ nread, baseChunk, ok));
+
+ return ok ? 1 : 0;
+ }
+@@ -4315,41 +4383,40 @@ static int yaffs_WriteCheckpointObjects(
+ yaffs_CheckpointObject cp;
+ int i;
+ int ok = 1;
+- struct list_head *lh;
++ struct ylist_head *lh;
+
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+- for(i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++){
+- list_for_each(lh, &dev->objectBucket[i].list) {
++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->objectBucket[i].list) {
+ if (lh) {
+- obj = list_entry(lh, yaffs_Object, hashLink);
++ obj = ylist_entry(lh, yaffs_Object, hashLink);
+ if (!obj->deferedFree) {
+- yaffs_ObjectToCheckpointObject(&cp,obj);
++ yaffs_ObjectToCheckpointObject(&cp, obj);
+ cp.structType = sizeof(cp);
+
+- T(YAFFS_TRACE_CHECKPOINT,(
++ T(YAFFS_TRACE_CHECKPOINT, (
+ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+- cp.objectId,cp.parentId,cp.variantType,cp.chunkId,(unsigned) obj));
++ cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, (unsigned) obj));
+
+- ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++ ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+- if(ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE){
++ if (ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs_WriteCheckpointTnodes(obj);
+- }
+ }
+ }
+ }
+- }
++ }
+
+- /* Dump end of list */
+- memset(&cp,0xFF,sizeof(yaffs_CheckpointObject));
++ /* Dump end of list */
++ memset(&cp, 0xFF, sizeof(yaffs_CheckpointObject));
+ cp.structType = sizeof(cp);
+
+- if(ok)
+- ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++ if (ok)
++ ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+ }
+@@ -4362,38 +4429,39 @@ static int yaffs_ReadCheckpointObjects(y
+ int done = 0;
+ yaffs_Object *hardList = NULL;
+
+- while(ok && !done) {
+- ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+- if(cp.structType != sizeof(cp)) {
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("struct size %d instead of %d ok %d"TENDSTR),
+- cp.structType,sizeof(cp),ok));
++ while (ok && !done) {
++ ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (cp.structType != sizeof(cp)) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
++ cp.structType, sizeof(cp), ok));
+ ok = 0;
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
+- cp.objectId,cp.parentId,cp.variantType,cp.chunkId));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
++ cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk));
+
+- if(ok && cp.objectId == ~0)
++ if (ok && cp.objectId == ~0)
+ done = 1;
+- else if(ok){
+- obj = yaffs_FindOrCreateObjectByNumber(dev,cp.objectId, cp.variantType);
+- if(obj) {
+- yaffs_CheckpointObjectToObject(obj,&cp);
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++ else if (ok) {
++ obj = yaffs_FindOrCreateObjectByNumber(dev, cp.objectId, cp.variantType);
++ if (obj) {
++ ok = yaffs_CheckpointObjectToObject(obj, &cp);
++ if (!ok)
++ break;
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs_ReadCheckpointTnodes(obj);
+- } else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++ } else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj->hardLinks.next =
+- (struct list_head *)
+- hardList;
++ (struct ylist_head *) hardList;
+ hardList = obj;
+ }
+-
+- }
++ } else
++ ok = 0;
+ }
+ }
+
+- if(ok)
+- yaffs_HardlinkFixup(dev,hardList);
++ if (ok)
++ yaffs_HardlinkFixup(dev, hardList);
+
+ return ok ? 1 : 0;
+ }
+@@ -4403,11 +4471,11 @@ static int yaffs_WriteCheckpointSum(yaff
+ __u32 checkpointSum;
+ int ok;
+
+- yaffs_GetCheckpointSum(dev,&checkpointSum);
++ yaffs_GetCheckpointSum(dev, &checkpointSum);
+
+- ok = (yaffs_CheckpointWrite(dev,&checkpointSum,sizeof(checkpointSum)) == sizeof(checkpointSum));
++ ok = (yaffs_CheckpointWrite(dev, &checkpointSum, sizeof(checkpointSum)) == sizeof(checkpointSum));
+
+- if(!ok)
++ if (!ok)
+ return 0;
+
+ return 1;
+@@ -4419,14 +4487,14 @@ static int yaffs_ReadCheckpointSum(yaffs
+ __u32 checkpointSum1;
+ int ok;
+
+- yaffs_GetCheckpointSum(dev,&checkpointSum0);
++ yaffs_GetCheckpointSum(dev, &checkpointSum0);
+
+- ok = (yaffs_CheckpointRead(dev,&checkpointSum1,sizeof(checkpointSum1)) == sizeof(checkpointSum1));
++ ok = (yaffs_CheckpointRead(dev, &checkpointSum1, sizeof(checkpointSum1)) == sizeof(checkpointSum1));
+
+- if(!ok)
++ if (!ok)
+ return 0;
+
+- if(checkpointSum0 != checkpointSum1)
++ if (checkpointSum0 != checkpointSum1)
+ return 0;
+
+ return 1;
+@@ -4435,46 +4503,43 @@ static int yaffs_ReadCheckpointSum(yaffs
+
+ static int yaffs_WriteCheckpointData(yaffs_Device *dev)
+ {
+-
+ int ok = 1;
+
+- if(dev->skipCheckpointWrite || !dev->isYaffs2){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint write" TENDSTR)));
++ if (dev->skipCheckpointWrite || !dev->isYaffs2) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
+ ok = 0;
+ }
+
+- if(ok)
+- ok = yaffs_CheckpointOpen(dev,1);
++ if (ok)
++ ok = yaffs_CheckpointOpen(dev, 1);
+
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev,1);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs_WriteCheckpointValidityMarker(dev, 1);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint device" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
+ ok = yaffs_WriteCheckpointDevice(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint objects" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
+ ok = yaffs_WriteCheckpointObjects(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev,0);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs_WriteCheckpointValidityMarker(dev, 0);
+ }
+
+- if(ok){
++ if (ok)
+ ok = yaffs_WriteCheckpointSum(dev);
+- }
+-
+
+- if(!yaffs_CheckpointClose(dev))
+- ok = 0;
++ if (!yaffs_CheckpointClose(dev))
++ ok = 0;
+
+- if(ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
++ if (ok)
++ dev->isCheckpointed = 1;
++ else
++ dev->isCheckpointed = 0;
+
+ return dev->isCheckpointed;
+ }
+@@ -4483,43 +4548,43 @@ static int yaffs_ReadCheckpointData(yaff
+ {
+ int ok = 1;
+
+- if(dev->skipCheckpointRead || !dev->isYaffs2){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint read" TENDSTR)));
++ if (dev->skipCheckpointRead || !dev->isYaffs2) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
+ ok = 0;
+ }
+
+- if(ok)
+- ok = yaffs_CheckpointOpen(dev,0); /* open for read */
++ if (ok)
++ ok = yaffs_CheckpointOpen(dev, 0); /* open for read */
+
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev,1);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs_ReadCheckpointValidityMarker(dev, 1);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint device" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
+ ok = yaffs_ReadCheckpointDevice(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint objects" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
+ ok = yaffs_ReadCheckpointObjects(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev,0);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs_ReadCheckpointValidityMarker(dev, 0);
+ }
+
+- if(ok){
++ if (ok) {
+ ok = yaffs_ReadCheckpointSum(dev);
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint checksum %d" TENDSTR),ok));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
+ }
+
+- if(!yaffs_CheckpointClose(dev))
++ if (!yaffs_CheckpointClose(dev))
+ ok = 0;
+
+- if(ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
++ if (ok)
++ dev->isCheckpointed = 1;
++ else
++ dev->isCheckpointed = 0;
+
+ return ok ? 1 : 0;
+
+@@ -4527,11 +4592,11 @@ static int yaffs_ReadCheckpointData(yaff
+
+ static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
+ {
+- if(dev->isCheckpointed ||
+- dev->blocksInCheckpoint > 0){
++ if (dev->isCheckpointed ||
++ dev->blocksInCheckpoint > 0) {
+ dev->isCheckpointed = 0;
+ yaffs_CheckpointInvalidateStream(dev);
+- if(dev->superBlock && dev->markSuperBlockDirty)
++ if (dev->superBlock && dev->markSuperBlockDirty)
+ dev->markSuperBlockDirty(dev->superBlock);
+ }
+ }
+@@ -4540,18 +4605,18 @@ static void yaffs_InvalidateCheckpoint(y
+ int yaffs_CheckpointSave(yaffs_Device *dev)
+ {
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("save entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ yaffs_VerifyObjects(dev);
+ yaffs_VerifyBlocks(dev);
+ yaffs_VerifyFreeChunks(dev);
+
+- if(!dev->isCheckpointed) {
++ if (!dev->isCheckpointed) {
+ yaffs_InvalidateCheckpoint(dev);
+ yaffs_WriteCheckpointData(dev);
+ }
+
+- T(YAFFS_TRACE_ALWAYS,(TSTR("save exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ return dev->isCheckpointed;
+ }
+@@ -4559,17 +4624,17 @@ int yaffs_CheckpointSave(yaffs_Device *d
+ int yaffs_CheckpointRestore(yaffs_Device *dev)
+ {
+ int retval;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ retval = yaffs_ReadCheckpointData(dev);
+
+- if(dev->isCheckpointed){
++ if (dev->isCheckpointed) {
+ yaffs_VerifyObjects(dev);
+ yaffs_VerifyBlocks(dev);
+ yaffs_VerifyFreeChunks(dev);
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ return retval;
+ }
+@@ -4584,12 +4649,12 @@ int yaffs_CheckpointRestore(yaffs_Device
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+-int yaffs_ReadDataFromFile(yaffs_Object * in, __u8 * buffer, loff_t offset,
+- int nBytes)
++int yaffs_ReadDataFromFile(yaffs_Object *in, __u8 *buffer, loff_t offset,
++ int nBytes)
+ {
+
+ int chunk;
+- int start;
++ __u32 start;
+ int nToCopy;
+ int n = nBytes;
+ int nDone = 0;
+@@ -4600,27 +4665,26 @@ int yaffs_ReadDataFromFile(yaffs_Object
+ dev = in->myDev;
+
+ while (n > 0) {
+- //chunk = offset / dev->nDataBytesPerChunk + 1;
+- //start = offset % dev->nDataBytesPerChunk;
+- yaffs_AddrToChunk(dev,offset,&chunk,&start);
++ /* chunk = offset / dev->nDataBytesPerChunk + 1; */
++ /* start = offset % dev->nDataBytesPerChunk; */
++ yaffs_AddrToChunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+- if ((start + n) < dev->nDataBytesPerChunk) {
++ if ((start + n) < dev->nDataBytesPerChunk)
+ nToCopy = n;
+- } else {
++ else
+ nToCopy = dev->nDataBytesPerChunk - start;
+- }
+
+ cache = yaffs_FindChunkCache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than a whole chunk
+- * then use the cache (if there is caching)
++ * or we're using inband tags then use the cache (if there is caching)
+ * else bypass the cache.
+ */
+- if (cache || nToCopy != dev->nDataBytesPerChunk) {
++ if (cache || nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+ if (dev->nShortOpCaches > 0) {
+
+ /* If we can't find the data in the cache, then load it up. */
+@@ -4641,14 +4705,9 @@ int yaffs_ReadDataFromFile(yaffs_Object
+
+ cache->locked = 1;
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+ memcpy(buffer, &cache->data[start], nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy..*/
+@@ -4657,41 +4716,19 @@ int yaffs_ReadDataFromFile(yaffs_Object
+ yaffs_GetTempBuffer(dev, __LINE__);
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ localBuffer);
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+ memcpy(buffer, &localBuffer[start], nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
++
+ yaffs_ReleaseTempBuffer(dev, localBuffer,
+ __LINE__);
+ }
+
+ } else {
+-#ifdef CONFIG_YAFFS_WINCE
+- __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+-
+- /* Under WinCE can't do direct transfer. Need to use a local buffer.
+- * This is because we otherwise screw up WinCE's memory mapper
+- */
+- yaffs_ReadChunkDataFromObject(in, chunk, localBuffer);
+-
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
+- memcpy(buffer, localBuffer, dev->nDataBytesPerChunk);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+- yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+-#endif
+-
+-#else
+ /* A full chunk. Read directly into the supplied buffer. */
+ yaffs_ReadChunkDataFromObject(in, chunk, buffer);
+-#endif
++
+ }
+
+ n -= nToCopy;
+@@ -4704,28 +4741,37 @@ int yaffs_ReadDataFromFile(yaffs_Object
+ return nDone;
+ }
+
+-int yaffs_WriteDataToFile(yaffs_Object * in, const __u8 * buffer, loff_t offset,
+- int nBytes, int writeThrough)
++int yaffs_WriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset,
++ int nBytes, int writeThrough)
+ {
+
+ int chunk;
+- int start;
++ __u32 start;
+ int nToCopy;
+ int n = nBytes;
+ int nDone = 0;
+ int nToWriteBack;
+ int startOfWrite = offset;
+ int chunkWritten = 0;
+- int nBytesRead;
++ __u32 nBytesRead;
++ __u32 chunkStart;
+
+ yaffs_Device *dev;
+
+ dev = in->myDev;
+
+ while (n > 0 && chunkWritten >= 0) {
+- //chunk = offset / dev->nDataBytesPerChunk + 1;
+- //start = offset % dev->nDataBytesPerChunk;
+- yaffs_AddrToChunk(dev,offset,&chunk,&start);
++ /* chunk = offset / dev->nDataBytesPerChunk + 1; */
++ /* start = offset % dev->nDataBytesPerChunk; */
++ yaffs_AddrToChunk(dev, offset, &chunk, &start);
++
++ if (chunk * dev->nDataBytesPerChunk + start != offset ||
++ start >= dev->nDataBytesPerChunk) {
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("AddrToChunk of offset %d gives chunk %d start %d"
++ TENDSTR),
++ (int)offset, chunk, start));
++ }
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+@@ -4740,25 +4786,32 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ * we need to write back as much as was there before.
+ */
+
+- nBytesRead =
+- in->variant.fileVariant.fileSize -
+- ((chunk - 1) * dev->nDataBytesPerChunk);
++ chunkStart = ((chunk - 1) * dev->nDataBytesPerChunk);
++
++ if (chunkStart > in->variant.fileVariant.fileSize)
++ nBytesRead = 0; /* Past end of file */
++ else
++ nBytesRead = in->variant.fileVariant.fileSize - chunkStart;
+
+- if (nBytesRead > dev->nDataBytesPerChunk) {
++ if (nBytesRead > dev->nDataBytesPerChunk)
+ nBytesRead = dev->nDataBytesPerChunk;
+- }
+
+ nToWriteBack =
+ (nBytesRead >
+ (start + n)) ? nBytesRead : (start + n);
+
++ if (nToWriteBack < 0 || nToWriteBack > dev->nDataBytesPerChunk)
++ YBUG();
++
+ } else {
+ nToCopy = dev->nDataBytesPerChunk - start;
+ nToWriteBack = dev->nDataBytesPerChunk;
+ }
+
+- if (nToCopy != dev->nDataBytesPerChunk) {
+- /* An incomplete start or end chunk (or maybe both start and end chunk) */
++ if (nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
++ /* An incomplete start or end chunk (or maybe both start and end chunk),
++ * or we're using inband tags, so we want to use the cache buffers.
++ */
+ if (dev->nShortOpCaches > 0) {
+ yaffs_ChunkCache *cache;
+ /* If we can't find the data in the cache, then load the cache */
+@@ -4775,10 +4828,9 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ cache->
+ data);
+- }
+- else if(cache &&
+- !cache->dirty &&
+- !yaffs_CheckSpaceForAllocation(in->myDev)){
++ } else if (cache &&
++ !cache->dirty &&
++ !yaffs_CheckSpaceForAllocation(in->myDev)) {
+ /* Drop the cache if it was a read cache item and
+ * no space check has been made for it.
+ */
+@@ -4788,16 +4840,12 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ if (cache) {
+ yaffs_UseChunkCache(dev, cache, 1);
+ cache->locked = 1;
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+
+ memcpy(&cache->data[start], buffer,
+ nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
++
+ cache->locked = 0;
+ cache->nBytes = nToWriteBack;
+
+@@ -4825,15 +4873,10 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ localBuffer);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+
+ memcpy(&localBuffer[start], buffer, nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk,
+ localBuffer,
+@@ -4846,31 +4889,15 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ }
+
+ } else {
+-
+-#ifdef CONFIG_YAFFS_WINCE
+- /* Under WinCE can't do direct transfer. Need to use a local buffer.
+- * This is because we otherwise screw up WinCE's memory mapper
+- */
+- __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
+- memcpy(localBuffer, buffer, dev->nDataBytesPerChunk);
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(in, chunk, localBuffer,
+- dev->nDataBytesPerChunk,
+- 0);
+- yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+-#else
+ /* A full chunk. Write directly from the supplied buffer. */
++
++
++
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk, buffer,
+ dev->nDataBytesPerChunk,
+ 0);
+-#endif
++
+ /* Since we've overwritten the cached data, we better invalidate it. */
+ yaffs_InvalidateChunkCache(in, chunk);
+ }
+@@ -4886,9 +4913,8 @@ int yaffs_WriteDataToFile(yaffs_Object *
+
+ /* Update file object */
+
+- if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize) {
++ if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize)
+ in->variant.fileVariant.fileSize = (startOfWrite + nDone);
+- }
+
+ in->dirty = 1;
+
+@@ -4898,7 +4924,7 @@ int yaffs_WriteDataToFile(yaffs_Object *
+
+ /* ---------------------- File resizing stuff ------------------ */
+
+-static void yaffs_PruneResizedChunks(yaffs_Object * in, int newSize)
++static void yaffs_PruneResizedChunks(yaffs_Object *in, int newSize)
+ {
+
+ yaffs_Device *dev = in->myDev;
+@@ -4939,11 +4965,11 @@ static void yaffs_PruneResizedChunks(yaf
+
+ }
+
+-int yaffs_ResizeFile(yaffs_Object * in, loff_t newSize)
++int yaffs_ResizeFile(yaffs_Object *in, loff_t newSize)
+ {
+
+ int oldFileSize = in->variant.fileVariant.fileSize;
+- int newSizeOfPartialChunk;
++ __u32 newSizeOfPartialChunk;
+ int newFullChunks;
+
+ yaffs_Device *dev = in->myDev;
+@@ -4955,13 +4981,11 @@ int yaffs_ResizeFile(yaffs_Object * in,
+
+ yaffs_CheckGarbageCollection(dev);
+
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+- return yaffs_GetFileSize(in);
+- }
++ if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
+
+- if (newSize == oldFileSize) {
+- return oldFileSize;
+- }
++ if (newSize == oldFileSize)
++ return YAFFS_OK;
+
+ if (newSize < oldFileSize) {
+
+@@ -4994,21 +5018,20 @@ int yaffs_ResizeFile(yaffs_Object * in,
+ }
+
+
+-
+ /* Write a new object header.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories.
+ */
+- if (in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
+- in->parent->objectId != YAFFS_OBJECTID_DELETED) {
++ if (in->parent &&
++ in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
++ in->parent->objectId != YAFFS_OBJECTID_DELETED)
+ yaffs_UpdateObjectHeader(in, NULL, 0,
+ (newSize < oldFileSize) ? 1 : 0, 0);
+- }
+
+- return newSize;
++ return YAFFS_OK;
+ }
+
+-loff_t yaffs_GetFileSize(yaffs_Object * obj)
++loff_t yaffs_GetFileSize(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+
+@@ -5024,7 +5047,7 @@ loff_t yaffs_GetFileSize(yaffs_Object *
+
+
+
+-int yaffs_FlushFile(yaffs_Object * in, int updateTime)
++int yaffs_FlushFile(yaffs_Object *in, int updateTime)
+ {
+ int retVal;
+ if (in->dirty) {
+@@ -5039,9 +5062,8 @@ int yaffs_FlushFile(yaffs_Object * in, i
+ #endif
+ }
+
+- retVal =
+- (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
+- 0) ? YAFFS_OK : YAFFS_FAIL;
++ retVal = (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
++ 0) ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ retVal = YAFFS_OK;
+ }
+@@ -5050,7 +5072,7 @@ int yaffs_FlushFile(yaffs_Object * in, i
+
+ }
+
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object * in)
++static int yaffs_DoGenericObjectDeletion(yaffs_Object *in)
+ {
+
+ /* First off, invalidate the file's data in the cache, without flushing. */
+@@ -5058,13 +5080,13 @@ static int yaffs_DoGenericObjectDeletion
+
+ if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
+ /* Move to the unlinked directory so we have a record that it was deleted. */
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir,"deleted", 0, 0);
++ yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0);
+
+ }
+
+ yaffs_RemoveObjectFromDirectory(in);
+- yaffs_DeleteChunk(in->myDev, in->chunkId, 1, __LINE__);
+- in->chunkId = -1;
++ yaffs_DeleteChunk(in->myDev, in->hdrChunk, 1, __LINE__);
++ in->hdrChunk = 0;
+
+ yaffs_FreeObject(in);
+ return YAFFS_OK;
+@@ -5075,62 +5097,63 @@ static int yaffs_DoGenericObjectDeletion
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+-static int yaffs_UnlinkFile(yaffs_Object * in)
++static int yaffs_UnlinkFileIfNeeded(yaffs_Object *in)
+ {
+
+ int retVal;
+ int immediateDeletion = 0;
+
+- if (1) {
+ #ifdef __KERNEL__
+- if (!in->myInode) {
+- immediateDeletion = 1;
+-
+- }
++ if (!in->myInode)
++ immediateDeletion = 1;
+ #else
+- if (in->inUse <= 0) {
+- immediateDeletion = 1;
+-
+- }
++ if (in->inUse <= 0)
++ immediateDeletion = 1;
+ #endif
+- if (immediateDeletion) {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir,
+- "deleted", 0, 0);
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
+- in->objectId));
+- in->deleted = 1;
+- in->myDev->nDeletedFiles++;
+- if (0 && in->myDev->isYaffs2) {
+- yaffs_ResizeFile(in, 0);
+- }
+- yaffs_SoftDeleteFile(in);
+- } else {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
+- "unlinked", 0, 0);
+- }
+
++ if (immediateDeletion) {
++ retVal =
++ yaffs_ChangeObjectName(in, in->myDev->deletedDir,
++ _Y("deleted"), 0, 0);
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
++ in->objectId));
++ in->deleted = 1;
++ in->myDev->nDeletedFiles++;
++ if (1 || in->myDev->isYaffs2)
++ yaffs_ResizeFile(in, 0);
++ yaffs_SoftDeleteFile(in);
++ } else {
++ retVal =
++ yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
++ _Y("unlinked"), 0, 0);
+ }
++
++
+ return retVal;
+ }
+
+-int yaffs_DeleteFile(yaffs_Object * in)
++int yaffs_DeleteFile(yaffs_Object *in)
+ {
+ int retVal = YAFFS_OK;
++ int deleted = in->deleted;
++
++ yaffs_ResizeFile(in, 0);
+
+ if (in->nDataChunks > 0) {
+- /* Use soft deletion if there is data in the file */
+- if (!in->unlinked) {
+- retVal = yaffs_UnlinkFile(in);
+- }
++ /* Use soft deletion if there is data in the file.
++ * That won't be the case if it has been resized to zero.
++ */
++ if (!in->unlinked)
++ retVal = yaffs_UnlinkFileIfNeeded(in);
++
+ if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
++ deleted = 1;
+ in->myDev->nDeletedFiles++;
+ yaffs_SoftDeleteFile(in);
+ }
+- return in->deleted ? YAFFS_OK : YAFFS_FAIL;
++ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
+@@ -5141,62 +5164,75 @@ int yaffs_DeleteFile(yaffs_Object * in)
+ }
+ }
+
+-static int yaffs_DeleteDirectory(yaffs_Object * in)
++static int yaffs_DeleteDirectory(yaffs_Object *in)
+ {
+ /* First check that the directory is empty. */
+- if (list_empty(&in->variant.directoryVariant.children)) {
++ if (ylist_empty(&in->variant.directoryVariant.children))
+ return yaffs_DoGenericObjectDeletion(in);
+- }
+
+ return YAFFS_FAIL;
+
+ }
+
+-static int yaffs_DeleteSymLink(yaffs_Object * in)
++static int yaffs_DeleteSymLink(yaffs_Object *in)
+ {
+ YFREE(in->variant.symLinkVariant.alias);
+
+ return yaffs_DoGenericObjectDeletion(in);
+ }
+
+-static int yaffs_DeleteHardLink(yaffs_Object * in)
++static int yaffs_DeleteHardLink(yaffs_Object *in)
+ {
+ /* remove this hardlink from the list assocaited with the equivalent
+ * object
+ */
+- list_del(&in->hardLinks);
++ ylist_del_init(&in->hardLinks);
+ return yaffs_DoGenericObjectDeletion(in);
+ }
+
+-static void yaffs_DestroyObject(yaffs_Object * obj)
++int yaffs_DeleteObject(yaffs_Object *obj)
+ {
++int retVal = -1;
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+- yaffs_DeleteFile(obj);
++ retVal = yaffs_DeleteFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+- yaffs_DeleteDirectory(obj);
++ return yaffs_DeleteDirectory(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+- yaffs_DeleteSymLink(obj);
++ retVal = yaffs_DeleteSymLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+- yaffs_DeleteHardLink(obj);
++ retVal = yaffs_DeleteHardLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+- yaffs_DoGenericObjectDeletion(obj);
++ retVal = yaffs_DoGenericObjectDeletion(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ retVal = 0;
+ break; /* should not happen. */
+ }
++
++ return retVal;
+ }
+
+-static int yaffs_UnlinkWorker(yaffs_Object * obj)
++static int yaffs_UnlinkWorker(yaffs_Object *obj)
+ {
+
++ int immediateDeletion = 0;
++
++#ifdef __KERNEL__
++ if (!obj->myInode)
++ immediateDeletion = 1;
++#else
++ if (obj->inUse <= 0)
++ immediateDeletion = 1;
++#endif
++
+ if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_DeleteHardLink(obj);
+- } else if (!list_empty(&obj->hardLinks)) {
++ } else if (!ylist_empty(&obj->hardLinks)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+@@ -5215,24 +5251,24 @@ static int yaffs_UnlinkWorker(yaffs_Obje
+ int retVal;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- hl = list_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
++ hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
+
+- list_del_init(&hl->hardLinks);
+- list_del_init(&hl->siblings);
++ ylist_del_init(&hl->hardLinks);
++ ylist_del_init(&hl->siblings);
+
+ yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+
+ retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
+
+- if (retVal == YAFFS_OK) {
++ if (retVal == YAFFS_OK)
+ retVal = yaffs_DoGenericObjectDeletion(hl);
+- }
++
+ return retVal;
+
+- } else {
++ } else if (immediateDeletion) {
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+- return yaffs_UnlinkFile(obj);
++ return yaffs_DeleteFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return yaffs_DeleteDirectory(obj);
+@@ -5248,21 +5284,22 @@ static int yaffs_UnlinkWorker(yaffs_Obje
+ default:
+ return YAFFS_FAIL;
+ }
+- }
++ } else
++ return yaffs_ChangeObjectName(obj, obj->myDev->unlinkedDir,
++ _Y("unlinked"), 0, 0);
+ }
+
+
+-static int yaffs_UnlinkObject( yaffs_Object *obj)
++static int yaffs_UnlinkObject(yaffs_Object *obj)
+ {
+
+- if (obj && obj->unlinkAllowed) {
++ if (obj && obj->unlinkAllowed)
+ return yaffs_UnlinkWorker(obj);
+- }
+
+ return YAFFS_FAIL;
+
+ }
+-int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name)
++int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name)
+ {
+ yaffs_Object *obj;
+
+@@ -5272,8 +5309,8 @@ int yaffs_Unlink(yaffs_Object * dir, con
+
+ /*----------------------- Initialisation Scanning ---------------------- */
+
+-static void yaffs_HandleShadowedObject(yaffs_Device * dev, int objId,
+- int backwardScanning)
++static void yaffs_HandleShadowedObject(yaffs_Device *dev, int objId,
++ int backwardScanning)
+ {
+ yaffs_Object *obj;
+
+@@ -5286,9 +5323,8 @@ static void yaffs_HandleShadowedObject(y
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+- if (yaffs_FindObjectByNumber(dev, objId)) {
++ if (yaffs_FindObjectByNumber(dev, objId))
+ return;
+- }
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+@@ -5297,6 +5333,8 @@ static void yaffs_HandleShadowedObject(y
+ obj =
+ yaffs_FindOrCreateObjectByNumber(dev, objId,
+ YAFFS_OBJECT_TYPE_FILE);
++ if (!obj)
++ return;
+ yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
+ obj->variant.fileVariant.shrinkSize = 0;
+ obj->valid = 1; /* So that we don't read any other info for this file */
+@@ -5325,44 +5363,77 @@ static void yaffs_HardlinkFixup(yaffs_De
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardLinkVariant.equivalentObject = in;
+- list_add(&hl->hardLinks, &in->hardLinks);
++ ylist_add(&hl->hardLinks, &in->hardLinks);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardLinkVariant.equivalentObject = NULL;
+- INIT_LIST_HEAD(&hl->hardLinks);
++ YINIT_LIST_HEAD(&hl->hardLinks);
+
+ }
+-
+ }
++}
++
++
+
++
++
++static int ybicmp(const void *a, const void *b)
++{
++ register int aseq = ((yaffs_BlockIndex *)a)->seq;
++ register int bseq = ((yaffs_BlockIndex *)b)->seq;
++ register int ablock = ((yaffs_BlockIndex *)a)->block;
++ register int bblock = ((yaffs_BlockIndex *)b)->block;
++ if (aseq == bseq)
++ return ablock - bblock;
++ else
++ return aseq - bseq;
+ }
+
+
++struct yaffs_ShadowFixerStruct {
++ int objectId;
++ int shadowedId;
++ struct yaffs_ShadowFixerStruct *next;
++};
++
+
++static void yaffs_StripDeletedObjects(yaffs_Device *dev)
++{
++ /*
++ * Sort out state of unlinked and deleted objects after scanning.
++ */
++ struct ylist_head *i;
++ struct ylist_head *n;
++ yaffs_Object *l;
+
++ /* Soft delete all the unlinked files */
++ ylist_for_each_safe(i, n,
++ &dev->unlinkedDir->variant.directoryVariant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_Object, siblings);
++ yaffs_DeleteObject(l);
++ }
++ }
+
+-static int ybicmp(const void *a, const void *b){
+- register int aseq = ((yaffs_BlockIndex *)a)->seq;
+- register int bseq = ((yaffs_BlockIndex *)b)->seq;
+- register int ablock = ((yaffs_BlockIndex *)a)->block;
+- register int bblock = ((yaffs_BlockIndex *)b)->block;
+- if( aseq == bseq )
+- return ablock - bblock;
+- else
+- return aseq - bseq;
++ ylist_for_each_safe(i, n,
++ &dev->deletedDir->variant.directoryVariant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_Object, siblings);
++ yaffs_DeleteObject(l);
++ }
++ }
+
+ }
+
+-static int yaffs_Scan(yaffs_Device * dev)
++static int yaffs_Scan(yaffs_Device *dev)
+ {
+ yaffs_ExtendedTags tags;
+ int blk;
+ int blockIterator;
+ int startIterator;
+ int endIterator;
+- int nBlocksToScan = 0;
+ int result;
+
+ int chunk;
+@@ -5371,26 +5442,19 @@ static int yaffs_Scan(yaffs_Device * dev
+ yaffs_BlockState state;
+ yaffs_Object *hardList = NULL;
+ yaffs_BlockInfo *bi;
+- int sequenceNumber;
++ __u32 sequenceNumber;
+ yaffs_ObjectHeader *oh;
+ yaffs_Object *in;
+ yaffs_Object *parent;
+- int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+
+ int alloc_failed = 0;
+
++ struct yaffs_ShadowFixerStruct *shadowFixerList = NULL;
++
+
+ __u8 *chunkData;
+
+- yaffs_BlockIndex *blockIndex = NULL;
+
+- if (dev->isYaffs2) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_Scan is not for YAFFS2!" TENDSTR)));
+- return YAFFS_FAIL;
+- }
+-
+- //TODO Throw all the yaffs2 stuuf out of yaffs_Scan since it is only for yaffs1 format.
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan starts intstartblk %d intendblk %d..." TENDSTR),
+@@ -5400,12 +5464,6 @@ static int yaffs_Scan(yaffs_Device * dev
+
+ dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+- if (dev->isYaffs2) {
+- blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
+- if(!blockIndex)
+- return YAFFS_FAIL;
+- }
+-
+ /* Scan all the blocks to determine their state */
+ for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+ bi = yaffs_GetBlockInfo(dev, blk);
+@@ -5418,6 +5476,9 @@ static int yaffs_Scan(yaffs_Device * dev
+ bi->blockState = state;
+ bi->sequenceNumber = sequenceNumber;
+
++ if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+ state, sequenceNumber));
+@@ -5430,70 +5491,21 @@ static int yaffs_Scan(yaffs_Device * dev
+ (TSTR("Block empty " TENDSTR)));
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+- } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+-
+- /* Determine the highest sequence number */
+- if (dev->isYaffs2 &&
+- sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+- sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+-
+- blockIndex[nBlocksToScan].seq = sequenceNumber;
+- blockIndex[nBlocksToScan].block = blk;
+-
+- nBlocksToScan++;
+-
+- if (sequenceNumber >= dev->sequenceNumber) {
+- dev->sequenceNumber = sequenceNumber;
+- }
+- } else if (dev->isYaffs2) {
+- /* TODO: Nasty sequence number! */
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- ("Block scanning block %d has bad sequence number %d"
+- TENDSTR), blk, sequenceNumber));
+-
+- }
+ }
+ }
+
+- /* Sort the blocks
+- * Dungy old bubble sort for now...
+- */
+- if (dev->isYaffs2) {
+- yaffs_BlockIndex temp;
+- int i;
+- int j;
+-
+- for (i = 0; i < nBlocksToScan; i++)
+- for (j = i + 1; j < nBlocksToScan; j++)
+- if (blockIndex[i].seq > blockIndex[j].seq) {
+- temp = blockIndex[j];
+- blockIndex[j] = blockIndex[i];
+- blockIndex[i] = temp;
+- }
+- }
+-
+- /* Now scan the blocks looking at the data. */
+- if (dev->isYaffs2) {
+- startIterator = 0;
+- endIterator = nBlocksToScan - 1;
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
+- } else {
+- startIterator = dev->internalStartBlock;
+- endIterator = dev->internalEndBlock;
+- }
++ startIterator = dev->internalStartBlock;
++ endIterator = dev->internalEndBlock;
+
+ /* For each block.... */
+ for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
+ blockIterator++) {
+
+- if (dev->isYaffs2) {
+- /* get the block to scan in the correct order */
+- blk = blockIndex[blockIterator].block;
+- } else {
+- blk = blockIterator;
+- }
++ YYIELD();
++
++ YYIELD();
++
++ blk = blockIterator;
+
+ bi = yaffs_GetBlockInfo(dev, blk);
+ state = bi->blockState;
+@@ -5511,7 +5523,7 @@ static int yaffs_Scan(yaffs_Device * dev
+
+ /* Let's have a good look at this chunk... */
+
+- if (!dev->isYaffs2 && tags.chunkDeleted) {
++ if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED || tags.chunkDeleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+@@ -5540,18 +5552,6 @@ static int yaffs_Scan(yaffs_Device * dev
+ dev->allocationBlockFinder = blk;
+ /* Set it to here to encourage the allocator to go forth from here. */
+
+- /* Yaffs2 sanity check:
+- * This should be the one with the highest sequence number
+- */
+- if (dev->isYaffs2
+- && (dev->sequenceNumber !=
+- bi->sequenceNumber)) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR
+- ("yaffs: Allocation block %d was not highest sequence id:"
+- " block seq = %d, dev seq = %d"
+- TENDSTR), blk,bi->sequenceNumber,dev->sequenceNumber));
+- }
+ }
+
+ dev->nFreeChunks += (dev->nChunksPerBlock - c);
+@@ -5570,11 +5570,11 @@ static int yaffs_Scan(yaffs_Device * dev
+ * the same chunkId).
+ */
+
+- if(!in)
++ if (!in)
+ alloc_failed = 1;
+
+- if(in){
+- if(!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk,1))
++ if (in) {
++ if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, 1))
+ alloc_failed = 1;
+ }
+
+@@ -5617,7 +5617,7 @@ static int yaffs_Scan(yaffs_Device * dev
+ * deleted, and worse still it has changed type. Delete the old object.
+ */
+
+- yaffs_DestroyObject(in);
++ yaffs_DeleteObject(in);
+
+ in = 0;
+ }
+@@ -5627,14 +5627,20 @@ static int yaffs_Scan(yaffs_Device * dev
+ objectId,
+ oh->type);
+
+- if(!in)
++ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadowsObject > 0) {
+- yaffs_HandleShadowedObject(dev,
+- oh->
+- shadowsObject,
+- 0);
++
++ struct yaffs_ShadowFixerStruct *fixer;
++ fixer = YMALLOC(sizeof(struct yaffs_ShadowFixerStruct));
++ if (fixer) {
++ fixer->next = shadowFixerList;
++ shadowFixerList = fixer;
++ fixer->objectId = tags.objectId;
++ fixer->shadowedId = oh->shadowsObject;
++ }
++
+ }
+
+ if (in && in->valid) {
+@@ -5643,12 +5649,10 @@ static int yaffs_Scan(yaffs_Device * dev
+ unsigned existingSerial = in->serial;
+ unsigned newSerial = tags.serialNumber;
+
+- if (dev->isYaffs2 ||
+- ((existingSerial + 1) & 3) ==
+- newSerial) {
++ if (((existingSerial + 1) & 3) == newSerial) {
+ /* Use new one - destroy the exisiting one */
+ yaffs_DeleteChunk(dev,
+- in->chunkId,
++ in->hdrChunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+@@ -5681,7 +5685,8 @@ static int yaffs_Scan(yaffs_Device * dev
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+ #endif
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
++ in->serial = tags.serialNumber;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+@@ -5705,7 +5710,8 @@ static int yaffs_Scan(yaffs_Device * dev
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+ #endif
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
++ in->serial = tags.serialNumber;
+
+ yaffs_SetObjectName(in, oh->name);
+ in->dirty = 0;
+@@ -5718,25 +5724,25 @@ static int yaffs_Scan(yaffs_Device * dev
+ yaffs_FindOrCreateObjectByNumber
+ (dev, oh->parentObjectId,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+- if (parent->variantType ==
++ if (!parent)
++ alloc_failed = 1;
++ if (parent && parent->variantType ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- INIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY)
+- {
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ directoryVariant.
++ children);
++ } else if (!parent || parent->variantType !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+- ("yaffs tragedy: attempting to use non-directory as"
+- " a directory in scan. Put in lost+found."
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ TENDSTR)));
+ parent = dev->lostNFoundDir;
+ }
+@@ -5760,15 +5766,6 @@ static int yaffs_Scan(yaffs_Device * dev
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+- if (dev->isYaffs2
+- && oh->isShrink) {
+- /* Prune back the shrunken chunks */
+- yaffs_PruneResizedChunks
+- (in, oh->fileSize);
+- /* Mark the block as having a shrinkHeader */
+- bi->hasShrinkHeader = 1;
+- }
+-
+ if (dev->useHeaderFileSize)
+
+ in->variant.fileVariant.
+@@ -5778,11 +5775,11 @@ static int yaffs_Scan(yaffs_Device * dev
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardLinkVariant.
+- equivalentObjectId =
+- oh->equivalentObjectId;
++ equivalentObjectId =
++ oh->equivalentObjectId;
+ in->hardLinks.next =
+- (struct list_head *)
+- hardList;
++ (struct ylist_head *)
++ hardList;
+ hardList = in;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+@@ -5794,15 +5791,17 @@ static int yaffs_Scan(yaffs_Device * dev
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->alias);
+- if(!in->variant.symLinkVariant.alias)
++ if (!in->variant.symLinkVariant.alias)
+ alloc_failed = 1;
+ break;
+ }
+
++/*
+ if (parent == dev->deletedDir) {
+ yaffs_DestroyObject(in);
+ bi->hasShrinkHeader = 1;
+ }
++*/
+ }
+ }
+ }
+@@ -5823,10 +5822,6 @@ static int yaffs_Scan(yaffs_Device * dev
+
+ }
+
+- if (blockIndex) {
+- YFREE(blockIndex);
+- }
+-
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+@@ -5834,32 +5829,36 @@ static int yaffs_Scan(yaffs_Device * dev
+ * hardlinks.
+ */
+
+- yaffs_HardlinkFixup(dev,hardList);
++ yaffs_HardlinkFixup(dev, hardList);
+
+- /* Handle the unlinked files. Since they were left in an unlinked state we should
+- * just delete them.
+- */
++ /* Fix up any shadowed objects */
+ {
+- struct list_head *i;
+- struct list_head *n;
++ struct yaffs_ShadowFixerStruct *fixer;
++ yaffs_Object *obj;
+
+- yaffs_Object *l;
+- /* Soft delete all the unlinked files */
+- list_for_each_safe(i, n,
+- &dev->unlinkedDir->variant.directoryVariant.
+- children) {
+- if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- yaffs_DestroyObject(l);
+- }
++ while (shadowFixerList) {
++ fixer = shadowFixerList;
++ shadowFixerList = fixer->next;
++ /* Complete the rename transaction by deleting the shadowed object
++ * then setting the object header to unshadowed.
++ */
++ obj = yaffs_FindObjectByNumber(dev, fixer->shadowedId);
++ if (obj)
++ yaffs_DeleteObject(obj);
++
++ obj = yaffs_FindObjectByNumber(dev, fixer->objectId);
++
++ if (obj)
++ yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++
++ YFREE(fixer);
+ }
+ }
+
+ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+
+- if(alloc_failed){
++ if (alloc_failed)
+ return YAFFS_FAIL;
+- }
+
+ T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
+
+@@ -5871,25 +5870,27 @@ static void yaffs_CheckObjectDetailsLoad
+ {
+ __u8 *chunkData;
+ yaffs_ObjectHeader *oh;
+- yaffs_Device *dev = in->myDev;
++ yaffs_Device *dev;
+ yaffs_ExtendedTags tags;
+ int result;
+ int alloc_failed = 0;
+
+- if(!in)
++ if (!in)
+ return;
+
++ dev = in->myDev;
++
+ #if 0
+- T(YAFFS_TRACE_SCAN,(TSTR("details for object %d %s loaded" TENDSTR),
++ T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
+ in->objectId,
+ in->lazyLoaded ? "not yet" : "already"));
+ #endif
+
+- if(in->lazyLoaded){
++ if (in->lazyLoaded && in->hdrChunk > 0) {
+ in->lazyLoaded = 0;
+ chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev,in->chunkId,chunkData,&tags);
++ result = yaffs_ReadChunkWithTagsFromNAND(dev, in->hdrChunk, chunkData, &tags);
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+ in->yst_mode = oh->yst_mode;
+@@ -5911,18 +5912,18 @@ static void yaffs_CheckObjectDetailsLoad
+ #endif
+ yaffs_SetObjectName(in, oh->name);
+
+- if(in->variantType == YAFFS_OBJECT_TYPE_SYMLINK){
+- in->variant.symLinkVariant.alias =
++ if (in->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->alias);
+- if(!in->variant.symLinkVariant.alias)
++ if (!in->variant.symLinkVariant.alias)
+ alloc_failed = 1; /* Not returned to caller */
+ }
+
+- yaffs_ReleaseTempBuffer(dev,chunkData, __LINE__);
++ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+ }
+ }
+
+-static int yaffs_ScanBackwards(yaffs_Device * dev)
++static int yaffs_ScanBackwards(yaffs_Device *dev)
+ {
+ yaffs_ExtendedTags tags;
+ int blk;
+@@ -5938,7 +5939,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ yaffs_BlockState state;
+ yaffs_Object *hardList = NULL;
+ yaffs_BlockInfo *bi;
+- int sequenceNumber;
++ __u32 sequenceNumber;
+ yaffs_ObjectHeader *oh;
+ yaffs_Object *in;
+ yaffs_Object *parent;
+@@ -5972,12 +5973,12 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
+
+- if(!blockIndex) {
++ if (!blockIndex) {
+ blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
+ altBlockIndex = 1;
+ }
+
+- if(!blockIndex) {
++ if (!blockIndex) {
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
+ return YAFFS_FAIL;
+@@ -5999,15 +6000,17 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ bi->blockState = state;
+ bi->sequenceNumber = sequenceNumber;
+
+- if(bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ if (bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
+
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+ state, sequenceNumber));
+
+
+- if(state == YAFFS_BLOCK_STATE_CHECKPOINT){
++ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocksInCheckpoint++;
+
+ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+@@ -6021,8 +6024,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+- if (dev->isYaffs2 &&
+- sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++ if (sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ blockIndex[nBlocksToScan].seq = sequenceNumber;
+@@ -6030,10 +6032,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ nBlocksToScan++;
+
+- if (sequenceNumber >= dev->sequenceNumber) {
++ if (sequenceNumber >= dev->sequenceNumber)
+ dev->sequenceNumber = sequenceNumber;
+- }
+- } else if (dev->isYaffs2) {
++ } else {
+ /* TODO: Nasty sequence number! */
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+@@ -6053,11 +6054,13 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ /* Sort the blocks */
+ #ifndef CONFIG_YAFFS_USE_OWN_SORT
+- yaffs_qsort(blockIndex, nBlocksToScan,
+- sizeof(yaffs_BlockIndex), ybicmp);
++ {
++ /* Use qsort now. */
++ yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), ybicmp);
++ }
+ #else
+ {
+- /* Dungy old bubble sort... */
++ /* Dungy old bubble sort... */
+
+ yaffs_BlockIndex temp;
+ int i;
+@@ -6075,7 +6078,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ YYIELD();
+
+- T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++ T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
+
+ /* Now scan the blocks looking at the data. */
+ startIterator = 0;
+@@ -6085,10 +6088,10 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ /* For each block.... backwards */
+ for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
+- blockIterator--) {
+- /* Cooperative multitasking! This loop can run for so
++ blockIterator--) {
++ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+- YYIELD();
++ YYIELD();
+
+ /* get the block to scan in the correct order */
+ blk = blockIndex[blockIterator].block;
+@@ -6127,10 +6130,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ * this is the one being allocated from
+ */
+
+- if(foundChunksInBlock)
+- {
++ if (foundChunksInBlock) {
+ /* This is a chunk that was skipped due to failing the erased check */
+-
+ } else if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+@@ -6138,7 +6139,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ } else {
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING) {
+- if(dev->sequenceNumber == bi->sequenceNumber) {
++ if (dev->sequenceNumber == bi->sequenceNumber) {
+ /* this is the block being allocated from */
+
+ T(YAFFS_TRACE_SCAN,
+@@ -6150,27 +6151,31 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ dev->allocationBlock = blk;
+ dev->allocationPage = c;
+ dev->allocationBlockFinder = blk;
+- }
+- else {
++ } else {
+ /* This is a partially written block that is not
+ * the current allocation block. This block must have
+ * had a write failure, so set up for retirement.
+ */
+
+- bi->needsRetiring = 1;
++ /* bi->needsRetiring = 1; ??? TODO */
+ bi->gcPrioritise = 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Partially written block %d being set for retirement" TENDSTR),
++ (TSTR("Partially written block %d detected" TENDSTR),
+ blk));
+ }
+-
+ }
+-
+ }
+
+ dev->nFreeChunks++;
+
++ } else if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
++ blk, c));
++
++ dev->nFreeChunks++;
++
+ } else if (tags.chunkId > 0) {
+ /* chunkId > 0 so it is a data chunk... */
+ unsigned int endpos;
+@@ -6187,7 +6192,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ tags.
+ objectId,
+ YAFFS_OBJECT_TYPE_FILE);
+- if(!in){
++ if (!in) {
+ /* Out of memory */
+ alloc_failed = 1;
+ }
+@@ -6197,8 +6202,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ && chunkBase <
+ in->variant.fileVariant.shrinkSize) {
+ /* This has not been invalidated by a resize */
+- if(!yaffs_PutChunkIntoFile(in, tags.chunkId,
+- chunk, -1)){
++ if (!yaffs_PutChunkIntoFile(in, tags.chunkId,
++ chunk, -1)) {
+ alloc_failed = 1;
+ }
+
+@@ -6221,7 +6226,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ scannedFileSize;
+ }
+
+- } else if(in) {
++ } else if (in) {
+ /* This chunk has been invalidated by a resize, so delete */
+ yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+
+@@ -6242,6 +6247,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ in = yaffs_FindOrCreateObjectByNumber
+ (dev, tags.objectId,
+ tags.extraObjectType);
++ if (!in)
++ alloc_failed = 1;
+ }
+
+ if (!in ||
+@@ -6251,8 +6258,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ tags.extraShadows ||
+ (!in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))
+- ) {
++ tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+ /* If we don't have valid info then we need to read the chunk
+ * TODO In future we can probably defer reading the chunk and
+@@ -6266,8 +6272,17 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+- if (!in)
++ if (dev->inbandTags) {
++ /* Fix up the header if they got corrupted by inband tags */
++ oh->shadowsObject = oh->inbandShadowsObject;
++ oh->isShrink = oh->inbandIsShrink;
++ }
++
++ if (!in) {
+ in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
++ if (!in)
++ alloc_failed = 1;
++ }
+
+ }
+
+@@ -6275,10 +6290,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ /* TODO Hoosterman we have a problem! */
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+- ("yaffs tragedy: Could not make object for object %d "
+- "at chunk %d during scan"
++ ("yaffs tragedy: Could not make object for object %d at chunk %d during scan"
+ TENDSTR), tags.objectId, chunk));
+-
++ continue;
+ }
+
+ if (in->valid) {
+@@ -6289,10 +6303,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh &&
+- oh-> type == YAFFS_OBJECT_TYPE_FILE)||
++ oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+ (tags.extraHeaderInfoAvailable &&
+- tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))
+- ) {
++ tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))) {
+ __u32 thisSize =
+ (oh) ? oh->fileSize : tags.
+ extraFileLength;
+@@ -6300,7 +6313,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ (oh) ? oh->
+ parentObjectId : tags.
+ extraParentObjectId;
+- unsigned isShrink =
++
++
++ isShrink =
+ (oh) ? oh->isShrink : tags.
+ extraIsShrinkHeader;
+
+@@ -6323,9 +6338,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ thisSize;
+ }
+
+- if (isShrink) {
++ if (isShrink)
+ bi->hasShrinkHeader = 1;
+- }
+
+ }
+ /* Use existing - destroy this one. */
+@@ -6333,6 +6347,17 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ }
+
++ if (!in->valid && in->variantType !=
++ (oh ? oh->type : tags.extraObjectType))
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("yaffs tragedy: Bad object type, "
++ TCONT("%d != %d, for object %d at chunk ")
++ TCONT("%d during scan")
++ TENDSTR), oh ?
++ oh->type : tags.extraObjectType,
++ in->variantType, tags.objectId,
++ chunk));
++
+ if (!in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+ tags.objectId ==
+@@ -6340,7 +6365,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+
+- if(oh) {
++ if (oh) {
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+@@ -6365,15 +6390,15 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ in->lazyLoaded = 1;
+ }
+
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
+
+- if(oh) {
++ if (oh) {
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+@@ -6403,20 +6428,19 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ yaffs_SetObjectName(in, oh->name);
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+- (dev, oh->parentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
++ (dev, oh->parentObjectId,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ fileSize = oh->fileSize;
+- isShrink = oh->isShrink;
++ isShrink = oh->isShrink;
+ equivalentObjectId = oh->equivalentObjectId;
+
+- }
+- else {
++ } else {
+ in->variantType = tags.extraObjectType;
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+- (dev, tags.extraParentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
++ (dev, tags.extraParentObjectId,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
+ fileSize = tags.extraFileLength;
+ isShrink = tags.extraIsShrinkHeader;
+ equivalentObjectId = tags.extraEquivalentObjectId;
+@@ -6425,29 +6449,30 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ }
+ in->dirty = 0;
+
++ if (!parent)
++ alloc_failed = 1;
++
+ /* directory stuff...
+ * hook up to parent
+ */
+
+- if (parent->variantType ==
++ if (parent && parent->variantType ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- INIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY)
+- {
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ directoryVariant.
++ children);
++ } else if (!parent || parent->variantType !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+- ("yaffs tragedy: attempting to use non-directory as"
+- " a directory in scan. Put in lost+found."
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ TENDSTR)));
+ parent = dev->lostNFoundDir;
+ }
+@@ -6494,12 +6519,12 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+- if(!itsUnlinked) {
+- in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObjectId;
+- in->hardLinks.next =
+- (struct list_head *) hardList;
+- hardList = in;
++ if (!itsUnlinked) {
++ in->variant.hardLinkVariant.equivalentObjectId =
++ equivalentObjectId;
++ in->hardLinks.next =
++ (struct ylist_head *) hardList;
++ hardList = in;
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+@@ -6509,12 +6534,11 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+- if(oh){
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->
+- alias);
+- if(!in->variant.symLinkVariant.alias)
+- alloc_failed = 1;
++ if (oh) {
++ in->variant.symLinkVariant.alias =
++ yaffs_CloneString(oh->alias);
++ if (!in->variant.symLinkVariant.alias)
++ alloc_failed = 1;
+ }
+ break;
+ }
+@@ -6551,75 +6575,129 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+- yaffs_HardlinkFixup(dev,hardList);
++ yaffs_HardlinkFixup(dev, hardList);
+
+
+- /*
+- * Sort out state of unlinked and deleted objects.
+- */
+- {
+- struct list_head *i;
+- struct list_head *n;
++ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+
+- yaffs_Object *l;
++ if (alloc_failed)
++ return YAFFS_FAIL;
+
+- /* Soft delete all the unlinked files */
+- list_for_each_safe(i, n,
+- &dev->unlinkedDir->variant.directoryVariant.
+- children) {
+- if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- yaffs_DestroyObject(l);
+- }
+- }
++ T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
+
+- /* Soft delete all the deletedDir files */
+- list_for_each_safe(i, n,
+- &dev->deletedDir->variant.directoryVariant.
+- children) {
+- if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- yaffs_DestroyObject(l);
++ return YAFFS_OK;
++}
+
+- }
++/*------------------------------ Directory Functions ----------------------------- */
++
++static void yaffs_VerifyObjectInDirectory(yaffs_Object *obj)
++{
++ struct ylist_head *lh;
++ yaffs_Object *listObj;
++
++ int count = 0;
++
++ if (!obj) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (yaffs_SkipVerification(obj->myDev))
++ return;
++
++ if (!obj->parent) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &obj->parent->variant.directoryVariant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_Object, siblings);
++ yaffs_VerifyObject(listObj);
++ if (obj == listObj)
++ count++;
+ }
++ }
++
++ if (count != 1) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
++ YBUG();
+ }
++}
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++static void yaffs_VerifyDirectory(yaffs_Object *directory)
++{
++ struct ylist_head *lh;
++ yaffs_Object *listObj;
+
+- if(alloc_failed){
+- return YAFFS_FAIL;
++ if (!directory) {
++ YBUG();
++ return;
+ }
+
+- T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
++ if (yaffs_SkipFullVerification(directory->myDev))
++ return;
+
+- return YAFFS_OK;
++ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variantType));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &directory->variant.directoryVariant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_Object, siblings);
++ if (listObj->parent != directory) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
++ YBUG();
++ }
++ yaffs_VerifyObjectInDirectory(listObj);
++ }
++ }
+ }
+
+-/*------------------------------ Directory Functions ----------------------------- */
+
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj)
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj)
+ {
+ yaffs_Device *dev = obj->myDev;
++ yaffs_Object *parent;
++
++ yaffs_VerifyObjectInDirectory(obj);
++ parent = obj->parent;
++
++ yaffs_VerifyDirectory(parent);
+
+- if(dev && dev->removeObjectCallback)
++ if (dev && dev->removeObjectCallback)
+ dev->removeObjectCallback(obj);
+
+- list_del_init(&obj->siblings);
++
++ ylist_del_init(&obj->siblings);
+ obj->parent = NULL;
++
++ yaffs_VerifyDirectory(parent);
+ }
+
+
+-static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
+- yaffs_Object * obj)
++static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
++ yaffs_Object *obj)
+ {
+-
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: Trying to add an object to a null pointer directory"
+ TENDSTR)));
+ YBUG();
++ return;
+ }
+ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6631,37 +6709,42 @@ static void yaffs_AddObjectToDirectory(y
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+- INIT_LIST_HEAD(&obj->siblings);
+-
+- } else if (!list_empty(&obj->siblings)) {
+- /* If it is holed up somewhere else, un hook it */
+- yaffs_RemoveObjectFromDirectory(obj);
++ YBUG();
+ }
++
++
++ yaffs_VerifyDirectory(directory);
++
++ yaffs_RemoveObjectFromDirectory(obj);
++
++
+ /* Now add it */
+- list_add(&obj->siblings, &directory->variant.directoryVariant.children);
++ ylist_add(&obj->siblings, &directory->variant.directoryVariant.children);
+ obj->parent = directory;
+
+ if (directory == obj->myDev->unlinkedDir
+- || directory == obj->myDev->deletedDir) {
++ || directory == obj->myDev->deletedDir) {
+ obj->unlinked = 1;
+ obj->myDev->nUnlinkedFiles++;
+ obj->renameAllowed = 0;
+ }
++
++ yaffs_VerifyDirectory(directory);
++ yaffs_VerifyObjectInDirectory(obj);
+ }
+
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object * directory,
+- const YCHAR * name)
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object *directory,
++ const YCHAR *name)
+ {
+ int sum;
+
+- struct list_head *i;
++ struct ylist_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+ yaffs_Object *l;
+
+- if (!name) {
++ if (!name)
+ return NULL;
+- }
+
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6669,6 +6752,7 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+ ("tragedy: yaffs_FindObjectByName: null pointer directory"
+ TENDSTR)));
+ YBUG();
++ return NULL;
+ }
+ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6679,28 +6763,27 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+
+ sum = yaffs_CalcNameSum(name);
+
+- list_for_each(i, &directory->variant.directoryVariant.children) {
++ ylist_for_each(i, &directory->variant.directoryVariant.children) {
+ if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_Object, siblings);
++
++ if (l->parent != directory)
++ YBUG();
+
+ yaffs_CheckObjectDetailsLoaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+- if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0) {
++ if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0)
+ return l;
+- }
+- } else if (yaffs_SumCompare(l->sum, sum) || l->chunkId <= 0)
+- {
+- /* LostnFound cunk called Objxxx
++ } else if (yaffs_SumCompare(l->sum, sum) || l->hdrChunk <= 0) {
++ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_GetObjectName(l, buffer,
+ YAFFS_MAX_NAME_LENGTH);
+- if (yaffs_strncmp(name, buffer,YAFFS_MAX_NAME_LENGTH) == 0) {
++ if (yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+- }
+-
+ }
+ }
+ }
+@@ -6710,10 +6793,10 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+
+
+ #if 0
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
+- int (*fn) (yaffs_Object *))
++int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
++ int (*fn) (yaffs_Object *))
+ {
+- struct list_head *i;
++ struct ylist_head *i;
+ yaffs_Object *l;
+
+ if (!theDir) {
+@@ -6722,20 +6805,21 @@ int yaffs_ApplyToDirectoryChildren(yaffs
+ ("tragedy: yaffs_FindObjectByName: null pointer directory"
+ TENDSTR)));
+ YBUG();
++ return YAFFS_FAIL;
+ }
+ if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
+ YBUG();
++ return YAFFS_FAIL;
+ }
+
+- list_for_each(i, &theDir->variant.directoryVariant.children) {
++ ylist_for_each(i, &theDir->variant.directoryVariant.children) {
+ if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- if (l && !fn(l)) {
++ l = ylist_entry(i, yaffs_Object, siblings);
++ if (l && !fn(l))
+ return YAFFS_FAIL;
+- }
+ }
+ }
+
+@@ -6748,7 +6832,7 @@ int yaffs_ApplyToDirectoryChildren(yaffs
+ * actual object.
+ */
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj)
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj)
+ {
+ if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+@@ -6756,10 +6840,9 @@ yaffs_Object *yaffs_GetEquivalentObject(
+ yaffs_CheckObjectDetailsLoaded(obj);
+ }
+ return obj;
+-
+ }
+
+-int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize)
++int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize)
+ {
+ memset(name, 0, buffSize * sizeof(YCHAR));
+
+@@ -6767,18 +6850,26 @@ int yaffs_GetObjectName(yaffs_Object * o
+
+ if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+ yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
+- } else if (obj->chunkId <= 0) {
++ } else if (obj->hdrChunk <= 0) {
+ YCHAR locName[20];
++ YCHAR numString[20];
++ YCHAR *x = &numString[19];
++ unsigned v = obj->objectId;
++ numString[19] = 0;
++ while (v > 0) {
++ x--;
++ *x = '0' + (v % 10);
++ v /= 10;
++ }
+ /* make up a name */
+- yaffs_sprintf(locName, _Y("%s%d"), YAFFS_LOSTNFOUND_PREFIX,
+- obj->objectId);
++ yaffs_strcpy(locName, YAFFS_LOSTNFOUND_PREFIX);
++ yaffs_strcat(locName, x);
+ yaffs_strncpy(name, locName, buffSize - 1);
+
+ }
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- else if (obj->shortName[0]) {
++ else if (obj->shortName[0])
+ yaffs_strcpy(name, obj->shortName);
+- }
+ #endif
+ else {
+ int result;
+@@ -6788,9 +6879,9 @@ int yaffs_GetObjectName(yaffs_Object * o
+
+ memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
+
+- if (obj->chunkId >= 0) {
++ if (obj->hdrChunk > 0) {
+ result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
+- obj->chunkId, buffer,
++ obj->hdrChunk, buffer,
+ NULL);
+ }
+ yaffs_strncpy(name, oh->name, buffSize - 1);
+@@ -6801,46 +6892,43 @@ int yaffs_GetObjectName(yaffs_Object * o
+ return yaffs_strlen(name);
+ }
+
+-int yaffs_GetObjectFileLength(yaffs_Object * obj)
++int yaffs_GetObjectFileLength(yaffs_Object *obj)
+ {
+-
+ /* Dereference any hard linking */
+ obj = yaffs_GetEquivalentObject(obj);
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.fileVariant.fileSize;
+- }
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_strlen(obj->variant.symLinkVariant.alias);
+- } else {
++ else {
+ /* Only a directory should drop through to here */
+ return obj->myDev->nDataBytesPerChunk;
+ }
+ }
+
+-int yaffs_GetObjectLinkCount(yaffs_Object * obj)
++int yaffs_GetObjectLinkCount(yaffs_Object *obj)
+ {
+ int count = 0;
+- struct list_head *i;
++ struct ylist_head *i;
+
+- if (!obj->unlinked) {
+- count++; /* the object itself */
+- }
+- list_for_each(i, &obj->hardLinks) {
+- count++; /* add the hard links; */
+- }
+- return count;
++ if (!obj->unlinked)
++ count++; /* the object itself */
++
++ ylist_for_each(i, &obj->hardLinks)
++ count++; /* add the hard links; */
+
++ return count;
+ }
+
+-int yaffs_GetObjectInode(yaffs_Object * obj)
++int yaffs_GetObjectInode(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+
+ return obj->objectId;
+ }
+
+-unsigned yaffs_GetObjectType(yaffs_Object * obj)
++unsigned yaffs_GetObjectType(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+
+@@ -6872,19 +6960,18 @@ unsigned yaffs_GetObjectType(yaffs_Objec
+ }
+ }
+
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj)
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_CloneString(obj->variant.symLinkVariant.alias);
+- } else {
++ else
+ return yaffs_CloneString(_Y(""));
+- }
+ }
+
+ #ifndef CONFIG_YAFFS_WINCE
+
+-int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr)
++int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr)
+ {
+ unsigned int valid = attr->ia_valid;
+
+@@ -6910,7 +6997,7 @@ int yaffs_SetAttributes(yaffs_Object * o
+ return YAFFS_OK;
+
+ }
+-int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr)
++int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr)
+ {
+ unsigned int valid = 0;
+
+@@ -6934,13 +7021,12 @@ int yaffs_GetAttributes(yaffs_Object * o
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+-
+ }
+
+ #endif
+
+ #if 0
+-int yaffs_DumpObject(yaffs_Object * obj)
++int yaffs_DumpObject(yaffs_Object *obj)
+ {
+ YCHAR name[257];
+
+@@ -6951,7 +7037,7 @@ int yaffs_DumpObject(yaffs_Object * obj)
+ ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
+ " chunk %d type %d size %d\n"
+ TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
+- obj->dirty, obj->valid, obj->serial, obj->sum, obj->chunkId,
++ obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdrChunk,
+ yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
+
+ return YAFFS_OK;
+@@ -6960,7 +7046,7 @@ int yaffs_DumpObject(yaffs_Object * obj)
+
+ /*---------------------------- Initialisation code -------------------------------------- */
+
+-static int yaffs_CheckDevFunctions(const yaffs_Device * dev)
++static int yaffs_CheckDevFunctions(const yaffs_Device *dev)
+ {
+
+ /* Common functions, gotta have */
+@@ -7011,7 +7097,7 @@ static int yaffs_CreateInitialDirectorie
+ yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+- if(dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir){
++ if (dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir) {
+ yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
+ return YAFFS_OK;
+ }
+@@ -7019,7 +7105,7 @@ static int yaffs_CreateInitialDirectorie
+ return YAFFS_FAIL;
+ }
+
+-int yaffs_GutsInitialise(yaffs_Device * dev)
++int yaffs_GutsInitialise(yaffs_Device *dev)
+ {
+ int init_failed = 0;
+ unsigned x;
+@@ -7040,6 +7126,8 @@ int yaffs_GutsInitialise(yaffs_Device *
+ dev->chunkOffset = 0;
+ dev->nFreeChunks = 0;
+
++ dev->gcBlock = -1;
++
+ if (dev->startBlock == 0) {
+ dev->internalStartBlock = dev->startBlock + 1;
+ dev->internalEndBlock = dev->endBlock + 1;
+@@ -7049,18 +7137,18 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ /* Check geometry parameters. */
+
+- if ((dev->isYaffs2 && dev->nDataBytesPerChunk < 1024) ||
+- (!dev->isYaffs2 && dev->nDataBytesPerChunk != 512) ||
++ if ((!dev->inbandTags && dev->isYaffs2 && dev->totalBytesPerChunk < 1024) ||
++ (!dev->isYaffs2 && dev->totalBytesPerChunk < 512) ||
++ (dev->inbandTags && !dev->isYaffs2) ||
+ dev->nChunksPerBlock < 2 ||
+ dev->nReservedBlocks < 2 ||
+ dev->internalStartBlock <= 0 ||
+ dev->internalEndBlock <= 0 ||
+- dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2) // otherwise it is too small
+- ) {
++ dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)) { /* otherwise it is too small */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s "
+- TENDSTR), dev->nDataBytesPerChunk, dev->isYaffs2 ? "2" : ""));
++ ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d "
++ TENDSTR), dev->totalBytesPerChunk, dev->isYaffs2 ? "2" : "", dev->inbandTags));
+ return YAFFS_FAIL;
+ }
+
+@@ -7070,6 +7158,12 @@ int yaffs_GutsInitialise(yaffs_Device *
+ return YAFFS_FAIL;
+ }
+
++ /* Sort out space for inband tags, if required */
++ if (dev->inbandTags)
++ dev->nDataBytesPerChunk = dev->totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
++ else
++ dev->nDataBytesPerChunk = dev->totalBytesPerChunk;
++
+ /* Got the right mix of functions? */
+ if (!yaffs_CheckDevFunctions(dev)) {
+ /* Function missing */
+@@ -7097,31 +7191,18 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ dev->isMounted = 1;
+
+-
+-
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+- /* Start off assuming it is a power of 2 */
+- dev->chunkShift = ShiftDiv(dev->nDataBytesPerChunk);
+- dev->chunkMask = (1<<dev->chunkShift) - 1;
+-
+- if(dev->nDataBytesPerChunk == (dev->chunkMask + 1)){
+- /* Yes it is a power of 2, disable crumbs */
+- dev->crumbMask = 0;
+- dev->crumbShift = 0;
+- dev->crumbsPerChunk = 0;
+- } else {
+- /* Not a power of 2, use crumbs instead */
+- dev->crumbShift = ShiftDiv(sizeof(yaffs_PackedTags2TagsPart));
+- dev->crumbMask = (1<<dev->crumbShift)-1;
+- dev->crumbsPerChunk = dev->nDataBytesPerChunk/(1 << dev->crumbShift);
+- dev->chunkShift = 0;
+- dev->chunkMask = 0;
+- }
+-
++ x = dev->nDataBytesPerChunk;
++ /* We always use dev->chunkShift and dev->chunkDiv */
++ dev->chunkShift = Shifts(x);
++ x >>= dev->chunkShift;
++ dev->chunkDiv = x;
++ /* We only use chunk mask if chunkDiv is 1 */
++ dev->chunkMask = (1<<dev->chunkShift) - 1;
+
+ /*
+ * Calculate chunkGroupBits.
+@@ -7133,16 +7214,15 @@ int yaffs_GutsInitialise(yaffs_Device *
+ bits = ShiftsGE(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+- if(!dev->wideTnodesDisabled){
++ if (!dev->wideTnodesDisabled) {
+ /* bits must be even so that we end up with 32-bit words */
+- if(bits & 1)
++ if (bits & 1)
+ bits++;
+- if(bits < 16)
++ if (bits < 16)
+ dev->tnodeWidth = 16;
+ else
+ dev->tnodeWidth = bits;
+- }
+- else
++ } else
+ dev->tnodeWidth = 16;
+
+ dev->tnodeMask = (1<<dev->tnodeWidth)-1;
+@@ -7193,7 +7273,7 @@ int yaffs_GutsInitialise(yaffs_Device *
+ dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
+
+ /* Initialise temporary buffers and caches. */
+- if(!yaffs_InitialiseTempBuffers(dev))
++ if (!yaffs_InitialiseTempBuffers(dev))
+ init_failed = 1;
+
+ dev->srCache = NULL;
+@@ -7203,25 +7283,26 @@ int yaffs_GutsInitialise(yaffs_Device *
+ if (!init_failed &&
+ dev->nShortOpCaches > 0) {
+ int i;
+- __u8 *buf;
++ void *buf;
+ int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
+
+- if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES) {
++ if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
+- }
+
+- buf = dev->srCache = YMALLOC(srCacheBytes);
++ dev->srCache = YMALLOC(srCacheBytes);
+
+- if(dev->srCache)
+- memset(dev->srCache,0,srCacheBytes);
++ buf = (__u8 *) dev->srCache;
++
++ if (dev->srCache)
++ memset(dev->srCache, 0, srCacheBytes);
+
+ for (i = 0; i < dev->nShortOpCaches && buf; i++) {
+ dev->srCache[i].object = NULL;
+ dev->srCache[i].lastUse = 0;
+ dev->srCache[i].dirty = 0;
+- dev->srCache[i].data = buf = YMALLOC_DMA(dev->nDataBytesPerChunk);
++ dev->srCache[i].data = buf = YMALLOC_DMA(dev->totalBytesPerChunk);
+ }
+- if(!buf)
++ if (!buf)
+ init_failed = 1;
+
+ dev->srLastUse = 0;
+@@ -7229,29 +7310,30 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ dev->cacheHits = 0;
+
+- if(!init_failed){
++ if (!init_failed) {
+ dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
+- if(!dev->gcCleanupList)
++ if (!dev->gcCleanupList)
+ init_failed = 1;
+ }
+
+- if (dev->isYaffs2) {
++ if (dev->isYaffs2)
+ dev->useHeaderFileSize = 1;
+- }
+- if(!init_failed && !yaffs_InitialiseBlocks(dev))
++
++ if (!init_failed && !yaffs_InitialiseBlocks(dev))
+ init_failed = 1;
+
+ yaffs_InitialiseTnodes(dev);
+ yaffs_InitialiseObjects(dev);
+
+- if(!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_CreateInitialDirectories(dev))
+ init_failed = 1;
+
+
+- if(!init_failed){
++ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->isYaffs2) {
+- if(yaffs_CheckpointRestore(dev)) {
++ if (yaffs_CheckpointRestore(dev)) {
++ yaffs_CheckObjectDetailsLoaded(dev->rootDir);
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: restored from checkpoint" TENDSTR)));
+ } else {
+@@ -7273,24 +7355,25 @@ int yaffs_GutsInitialise(yaffs_Device *
+ dev->nBackgroundDeletions = 0;
+ dev->oldestDirtySequence = 0;
+
+- if(!init_failed && !yaffs_InitialiseBlocks(dev))
++ if (!init_failed && !yaffs_InitialiseBlocks(dev))
+ init_failed = 1;
+
+ yaffs_InitialiseTnodes(dev);
+ yaffs_InitialiseObjects(dev);
+
+- if(!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_CreateInitialDirectories(dev))
+ init_failed = 1;
+
+- if(!init_failed && !yaffs_ScanBackwards(dev))
++ if (!init_failed && !yaffs_ScanBackwards(dev))
+ init_failed = 1;
+ }
+- }else
+- if(!yaffs_Scan(dev))
++ } else if (!yaffs_Scan(dev))
+ init_failed = 1;
++
++ yaffs_StripDeletedObjects(dev);
+ }
+
+- if(init_failed){
++ if (init_failed) {
+ /* Clean up the mess */
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
+@@ -7318,7 +7401,7 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ }
+
+-void yaffs_Deinitialise(yaffs_Device * dev)
++void yaffs_Deinitialise(yaffs_Device *dev)
+ {
+ if (dev->isMounted) {
+ int i;
+@@ -7330,7 +7413,7 @@ void yaffs_Deinitialise(yaffs_Device * d
+ dev->srCache) {
+
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+- if(dev->srCache[i].data)
++ if (dev->srCache[i].data)
+ YFREE(dev->srCache[i].data);
+ dev->srCache[i].data = NULL;
+ }
+@@ -7341,16 +7424,17 @@ void yaffs_Deinitialise(yaffs_Device * d
+
+ YFREE(dev->gcCleanupList);
+
+- for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ YFREE(dev->tempBuffer[i].buffer);
+- }
+
+ dev->isMounted = 0;
+- }
+
++ if (dev->deinitialiseNAND)
++ dev->deinitialiseNAND(dev);
++ }
+ }
+
+-static int yaffs_CountFreeChunks(yaffs_Device * dev)
++static int yaffs_CountFreeChunks(yaffs_Device *dev)
+ {
+ int nFree;
+ int b;
+@@ -7358,7 +7442,7 @@ static int yaffs_CountFreeChunks(yaffs_D
+ yaffs_BlockInfo *blk;
+
+ for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
+- b++) {
++ b++) {
+ blk = yaffs_GetBlockInfo(dev, b);
+
+ switch (blk->blockState) {
+@@ -7373,19 +7457,19 @@ static int yaffs_CountFreeChunks(yaffs_D
+ default:
+ break;
+ }
+-
+ }
+
+ return nFree;
+ }
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev)
++int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev)
+ {
+ /* This is what we report to the outside world */
+
+ int nFree;
+ int nDirtyCacheChunks;
+ int blocksForCheckpoint;
++ int i;
+
+ #if 1
+ nFree = dev->nFreeChunks;
+@@ -7397,12 +7481,9 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+- {
+- int i;
+- for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].dirty)
+- nDirtyCacheChunks++;
+- }
++ for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
++ if (dev->srCache[i].dirty)
++ nDirtyCacheChunks++;
+ }
+
+ nFree -= nDirtyCacheChunks;
+@@ -7410,8 +7491,8 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+ nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+- blocksForCheckpoint = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
+- if(blocksForCheckpoint < 0)
++ blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
++ if (blocksForCheckpoint < 0)
+ blocksForCheckpoint = 0;
+
+ nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
+@@ -7425,12 +7506,12 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+
+ static int yaffs_freeVerificationFailures;
+
+-static void yaffs_VerifyFreeChunks(yaffs_Device * dev)
++static void yaffs_VerifyFreeChunks(yaffs_Device *dev)
+ {
+ int counted;
+ int difference;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ counted = yaffs_CountFreeChunks(dev);
+@@ -7447,23 +7528,25 @@ static void yaffs_VerifyFreeChunks(yaffs
+
+ /*---------------------------------------- YAFFS test code ----------------------*/
+
+-#define yaffs_CheckStruct(structure,syze, name) \
+- if(sizeof(structure) != syze) \
+- { \
+- T(YAFFS_TRACE_ALWAYS,(TSTR("%s should be %d but is %d\n" TENDSTR),\
+- name,syze,sizeof(structure))); \
+- return YAFFS_FAIL; \
+- }
++#define yaffs_CheckStruct(structure, syze, name) \
++ do { \
++ if (sizeof(structure) != syze) { \
++ T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\
++ name, syze, sizeof(structure))); \
++ return YAFFS_FAIL; \
++ } \
++ } while (0)
+
+ static int yaffs_CheckStructures(void)
+ {
+-/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags") */
+-/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion") */
+-/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare") */
++/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags"); */
++/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */
++/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */
+ #ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode")
++ yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode");
+ #endif
+- yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader")
+-
+- return YAFFS_OK;
++#ifndef CONFIG_YAFFS_WINCE
++ yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader");
++#endif
++ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_guts.h
++++ b/fs/yaffs2/yaffs_guts.h
+@@ -90,7 +90,7 @@
+
+ #define YAFFS_MAX_SHORT_OP_CACHES 20
+
+-#define YAFFS_N_TEMP_BUFFERS 4
++#define YAFFS_N_TEMP_BUFFERS 6
+
+ /* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+@@ -108,6 +108,9 @@
+ #define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+ #define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00
+
++/* Special sequence number for bad block that failed to be marked bad */
++#define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000
++
+ /* ChunkCache is used for short read/write operations.*/
+ typedef struct {
+ struct yaffs_ObjectStruct *object;
+@@ -134,11 +137,10 @@ typedef struct {
+ typedef struct {
+ unsigned chunkId:20;
+ unsigned serialNumber:2;
+- unsigned byteCount:10;
++ unsigned byteCountLSB:10;
+ unsigned objectId:18;
+ unsigned ecc:12;
+- unsigned unusedStuff:2;
+-
++ unsigned byteCountMSB:2;
+ } yaffs_Tags;
+
+ typedef union {
+@@ -277,13 +279,13 @@ typedef struct {
+
+ int softDeletions:10; /* number of soft deleted pages */
+ int pagesInUse:10; /* number of pages in use */
+- yaffs_BlockState blockState:4; /* One of the above block states */
++ unsigned blockState:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+ __u32 needsRetiring:1; /* Data has failed on this block, need to get valid data off */
+- /* and retire the block. */
+- __u32 skipErasedCheck: 1; /* If this is set we can skip the erased check on this block */
+- __u32 gcPrioritise: 1; /* An ECC check or blank check has failed on this block.
++ /* and retire the block. */
++ __u32 skipErasedCheck:1; /* If this is set we can skip the erased check on this block */
++ __u32 gcPrioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+- __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
++ __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+ __u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
+@@ -300,11 +302,11 @@ typedef struct {
+
+ /* Apply to everything */
+ int parentObjectId;
+- __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
++ __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- /* Thes following apply to directories, files, symlinks - not hard links */
+- __u32 yst_mode; /* protection */
++ /* The following apply to directories, files, symlinks - not hard links */
++ __u32 yst_mode; /* protection */
+
+ #ifdef CONFIG_YAFFS_WINCE
+ __u32 notForWinCE[5];
+@@ -331,11 +333,14 @@ typedef struct {
+ __u32 win_ctime[2];
+ __u32 win_atime[2];
+ __u32 win_mtime[2];
+- __u32 roomToGrow[4];
+ #else
+- __u32 roomToGrow[10];
++ __u32 roomToGrow[6];
++
+ #endif
++ __u32 inbandShadowsObject;
++ __u32 inbandIsShrink;
+
++ __u32 reservedSpace[2];
+ int shadowsObject; /* This object header shadows the specified object if > 0 */
+
+ /* isShrink applies to object headers written when we shrink the file (ie resize) */
+@@ -381,7 +386,7 @@ typedef struct {
+ } yaffs_FileStructure;
+
+ typedef struct {
+- struct list_head children; /* list of child links */
++ struct ylist_head children; /* list of child links */
+ } yaffs_DirectoryStructure;
+
+ typedef struct {
+@@ -418,23 +423,24 @@ struct yaffs_ObjectStruct {
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
++ __u8 beingCreated:1; /* This object is still being created so skip some checks. */
+
+ __u8 serial; /* serial number of chunk in NAND. Cached here */
+ __u16 sum; /* sum of the name to speed searching */
+
+- struct yaffs_DeviceStruct *myDev; /* The device I'm on */
++ struct yaffs_DeviceStruct *myDev; /* The device I'm on */
+
+- struct list_head hashLink; /* list of objects in this hash bucket */
++ struct ylist_head hashLink; /* list of objects in this hash bucket */
+
+- struct list_head hardLinks; /* all the equivalent hard linked objects */
++ struct ylist_head hardLinks; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_ObjectStruct *parent;
+- struct list_head siblings;
++ struct ylist_head siblings;
+
+ /* Where's my object header in NAND? */
+- int chunkId;
++ int hdrChunk;
+
+ int nDataChunks; /* Number of data chunks attached to the file. */
+
+@@ -485,7 +491,7 @@ struct yaffs_ObjectList_struct {
+ typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
+
+ typedef struct {
+- struct list_head list;
++ struct ylist_head list;
+ int count;
+ } yaffs_ObjectBucket;
+
+@@ -495,11 +501,10 @@ typedef struct {
+ */
+
+ typedef struct {
+- int structType;
++ int structType;
+ __u32 objectId;
+ __u32 parentId;
+- int chunkId;
+-
++ int hdrChunk;
+ yaffs_ObjectType variantType:3;
+ __u8 deleted:1;
+ __u8 softDeleted:1;
+@@ -511,8 +516,7 @@ typedef struct {
+
+ int nDataChunks;
+ __u32 fileSizeOrEquivalentObjectId;
+-
+-}yaffs_CheckpointObject;
++} yaffs_CheckpointObject;
+
+ /*--------------------- Temporary buffers ----------------
+ *
+@@ -528,13 +532,13 @@ typedef struct {
+ /*----------------- Device ---------------------------------*/
+
+ struct yaffs_DeviceStruct {
+- struct list_head devList;
++ struct ylist_head devList;
+ const char *name;
+
+ /* Entry parameters set up way early. Yaffs sets up the rest.*/
+ int nDataBytesPerChunk; /* Should be a power of 2 >= 512 */
+ int nChunksPerBlock; /* does not need to be a power of 2 */
+- int nBytesPerSpare; /* spare area size */
++ int spareBytesPerChunk; /* spare area size */
+ int startBlock; /* Start block we're allowed to use */
+ int endBlock; /* End block we're allowed to use */
+ int nReservedBlocks; /* We want this tuneable so that we can reduce */
+@@ -544,9 +548,7 @@ struct yaffs_DeviceStruct {
+ /* Stuff used by the shared space checkpointing mechanism */
+ /* If this value is zero, then this mechanism is disabled */
+
+- int nCheckpointReservedBlocks; /* Blocks to reserve for checkpoint data */
+-
+-
++/* int nCheckpointReservedBlocks; */ /* Blocks to reserve for checkpoint data */
+
+
+ int nShortOpCaches; /* If <= 0, then short op caching is disabled, else
+@@ -560,30 +562,31 @@ struct yaffs_DeviceStruct {
+ void *genericDevice; /* Pointer to device context
+ * On an mtd this holds the mtd pointer.
+ */
+- void *superBlock;
++ void *superBlock;
+
+ /* NAND access functions (Must be set before calling YAFFS)*/
+
+- int (*writeChunkToNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, const __u8 * data,
+- const yaffs_Spare * spare);
+- int (*readChunkFromNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, __u8 * data,
+- yaffs_Spare * spare);
+- int (*eraseBlockInNAND) (struct yaffs_DeviceStruct * dev,
+- int blockInNAND);
+- int (*initialiseNAND) (struct yaffs_DeviceStruct * dev);
++ int (*writeChunkToNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, const __u8 *data,
++ const yaffs_Spare *spare);
++ int (*readChunkFromNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, __u8 *data,
++ yaffs_Spare *spare);
++ int (*eraseBlockInNAND) (struct yaffs_DeviceStruct *dev,
++ int blockInNAND);
++ int (*initialiseNAND) (struct yaffs_DeviceStruct *dev);
++ int (*deinitialiseNAND) (struct yaffs_DeviceStruct *dev);
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+- int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, const __u8 * data,
+- const yaffs_ExtendedTags * tags);
+- int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, __u8 * data,
+- yaffs_ExtendedTags * tags);
+- int (*markNANDBlockBad) (struct yaffs_DeviceStruct * dev, int blockNo);
+- int (*queryNANDBlock) (struct yaffs_DeviceStruct * dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++ int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, __u8 *data,
++ yaffs_ExtendedTags *tags);
++ int (*markNANDBlockBad) (struct yaffs_DeviceStruct *dev, int blockNo);
++ int (*queryNANDBlock) (struct yaffs_DeviceStruct *dev, int blockNo,
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+ #endif
+
+ int isYaffs2;
+@@ -595,10 +598,12 @@ struct yaffs_DeviceStruct {
+ void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
+
+ /* Callback to mark the superblock dirsty */
+- void (*markSuperBlockDirty)(void * superblock);
++ void (*markSuperBlockDirty)(void *superblock);
+
+ int wideTnodesDisabled; /* Set to disable wide tnodes */
+
++ YCHAR *pathDividers; /* String of legal path dividers */
++
+
+ /* End of stuff that must be set before initialisation. */
+
+@@ -615,16 +620,14 @@ struct yaffs_DeviceStruct {
+ __u32 tnodeWidth;
+ __u32 tnodeMask;
+
+- /* Stuff to support various file offses to chunk/offset translations */
+- /* "Crumbs" for nDataBytesPerChunk not being a power of 2 */
+- __u32 crumbMask;
+- __u32 crumbShift;
+- __u32 crumbsPerChunk;
+-
+- /* Straight shifting for nDataBytesPerChunk being a power of 2 */
+- __u32 chunkShift;
+- __u32 chunkMask;
+-
++ /* Stuff for figuring out file offset to chunk conversions */
++ __u32 chunkShift; /* Shift value */
++ __u32 chunkDiv; /* Divisor after shifting: 1 for power-of-2 sizes */
++ __u32 chunkMask; /* Mask to use for power-of-2 case */
++
++ /* Stuff to handle inband tags */
++ int inbandTags;
++ __u32 totalBytesPerChunk;
+
+ #ifdef __KERNEL__
+
+@@ -633,7 +636,7 @@ struct yaffs_DeviceStruct {
+ __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
+ * at compile time so we have to allocate it.
+ */
+- void (*putSuperFunc) (struct super_block * sb);
++ void (*putSuperFunc) (struct super_block *sb);
+ #endif
+
+ int isMounted;
+@@ -663,6 +666,8 @@ struct yaffs_DeviceStruct {
+ __u32 checkpointSum;
+ __u32 checkpointXor;
+
++ int nCheckpointBlocksRequired; /* Number of blocks needed to store current checkpoint set */
++
+ /* Block Info */
+ yaffs_BlockInfo *blockInfo;
+ __u8 *chunkBits; /* bitmap of chunks in use */
+@@ -684,11 +689,15 @@ struct yaffs_DeviceStruct {
+ yaffs_TnodeList *allocatedTnodeList;
+
+ int isDoingGC;
++ int gcBlock;
++ int gcChunk;
+
+ int nObjectsCreated;
+ yaffs_Object *freeObjects;
+ int nFreeObjects;
+
++ int nHardLinks;
++
+ yaffs_ObjectList *allocatedObjectList;
+
+ yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
+@@ -745,8 +754,10 @@ struct yaffs_DeviceStruct {
+ int nBackgroundDeletions; /* Count of background deletions. */
+
+
++ /* Temporary buffer management */
+ yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
+ int maxTemp;
++ int tempInUse;
+ int unmanagedTempAllocations;
+ int unmanagedTempDeallocations;
+
+@@ -758,9 +769,9 @@ struct yaffs_DeviceStruct {
+
+ typedef struct yaffs_DeviceStruct yaffs_Device;
+
+-/* The static layout of bllock usage etc is stored in the super block header */
++/* The static layout of block usage etc is stored in the super block header */
+ typedef struct {
+- int StructType;
++ int StructType;
+ int version;
+ int checkpointStartBlock;
+ int checkpointEndBlock;
+@@ -773,7 +784,7 @@ typedef struct {
+ * must be preserved over unmount/mount cycles.
+ */
+ typedef struct {
+- int structType;
++ int structType;
+ int nErasedBlocks;
+ int allocationBlock; /* Current block being allocated off */
+ __u32 allocationPage;
+@@ -791,57 +802,45 @@ typedef struct {
+
+
+ typedef struct {
+- int structType;
+- __u32 magic;
+- __u32 version;
+- __u32 head;
++ int structType;
++ __u32 magic;
++ __u32 version;
++ __u32 head;
+ } yaffs_CheckpointValidity;
+
+-/* Function to manipulate block info */
+-static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
+-{
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
+- blk));
+- YBUG();
+- }
+- return &dev->blockInfo[blk - dev->internalStartBlock];
+-}
+
+ /*----------------------- YAFFS Functions -----------------------*/
+
+-int yaffs_GutsInitialise(yaffs_Device * dev);
+-void yaffs_Deinitialise(yaffs_Device * dev);
++int yaffs_GutsInitialise(yaffs_Device *dev);
++void yaffs_Deinitialise(yaffs_Device *dev);
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev);
++int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev);
+
+-int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
+- yaffs_Object * newDir, const YCHAR * newName);
++int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
++ yaffs_Object *newDir, const YCHAR *newName);
+
+-int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name);
+-int yaffs_DeleteFile(yaffs_Object * obj);
++int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name);
++int yaffs_DeleteObject(yaffs_Object *obj);
+
+-int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize);
+-int yaffs_GetObjectFileLength(yaffs_Object * obj);
+-int yaffs_GetObjectInode(yaffs_Object * obj);
+-unsigned yaffs_GetObjectType(yaffs_Object * obj);
+-int yaffs_GetObjectLinkCount(yaffs_Object * obj);
++int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize);
++int yaffs_GetObjectFileLength(yaffs_Object *obj);
++int yaffs_GetObjectInode(yaffs_Object *obj);
++unsigned yaffs_GetObjectType(yaffs_Object *obj);
++int yaffs_GetObjectLinkCount(yaffs_Object *obj);
+
+-int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr);
+-int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr);
++int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr);
++int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr);
+
+ /* File operations */
+-int yaffs_ReadDataFromFile(yaffs_Object * obj, __u8 * buffer, loff_t offset,
+- int nBytes);
+-int yaffs_WriteDataToFile(yaffs_Object * obj, const __u8 * buffer, loff_t offset,
+- int nBytes, int writeThrough);
+-int yaffs_ResizeFile(yaffs_Object * obj, loff_t newSize);
+-
+-yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid);
+-int yaffs_FlushFile(yaffs_Object * obj, int updateTime);
++int yaffs_ReadDataFromFile(yaffs_Object *obj, __u8 *buffer, loff_t offset,
++ int nBytes);
++int yaffs_WriteDataToFile(yaffs_Object *obj, const __u8 *buffer, loff_t offset,
++ int nBytes, int writeThrough);
++int yaffs_ResizeFile(yaffs_Object *obj, loff_t newSize);
++
++yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid);
++int yaffs_FlushFile(yaffs_Object *obj, int updateTime);
+
+ /* Flushing and checkpointing */
+ void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
+@@ -850,33 +849,33 @@ int yaffs_CheckpointSave(yaffs_Device *d
+ int yaffs_CheckpointRestore(yaffs_Device *dev);
+
+ /* Directory operations */
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid);
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object * theDir, const YCHAR * name);
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid);
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object *theDir, const YCHAR *name);
++int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
+ int (*fn) (yaffs_Object *));
+
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number);
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number);
+
+ /* Link operations */
+-yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
+- yaffs_Object * equivalentObject);
++yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
++ yaffs_Object *equivalentObject);
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj);
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj);
+
+ /* Symlink operations */
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid,
+- const YCHAR * alias);
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj);
++ const YCHAR *alias);
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj);
+
+ /* Special inodes (fifos, sockets and devices) */
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
+
+ /* Special directories */
+-yaffs_Object *yaffs_Root(yaffs_Device * dev);
+-yaffs_Object *yaffs_LostNFound(yaffs_Device * dev);
++yaffs_Object *yaffs_Root(yaffs_Device *dev);
++yaffs_Object *yaffs_LostNFound(yaffs_Device *dev);
+
+ #ifdef CONFIG_YAFFS_WINCE
+ /* CONFIG_YAFFS_WINCE special stuff */
+@@ -885,18 +884,21 @@ void yfsd_WinFileTimeNow(__u32 target[2]
+
+ #ifdef __KERNEL__
+
+-void yaffs_HandleDeferedFree(yaffs_Object * obj);
++void yaffs_HandleDeferedFree(yaffs_Object *obj);
+ #endif
+
+ /* Debug dump */
+-int yaffs_DumpObject(yaffs_Object * obj);
++int yaffs_DumpObject(yaffs_Object *obj);
+
+-void yaffs_GutsTest(yaffs_Device * dev);
++void yaffs_GutsTest(yaffs_Device *dev);
+
+ /* A few useful functions */
+-void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
+-void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn);
+-int yaffs_CheckFF(__u8 * buffer, int nBytes);
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
++void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn);
++int yaffs_CheckFF(__u8 *buffer, int nBytes);
+ void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
+
++__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo);
++void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, int lineNo);
++
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif1.c
++++ b/fs/yaffs2/yaffs_mtdif1.c
+@@ -26,7 +26,7 @@
+ #include "yportenv.h"
+ #include "yaffs_guts.h"
+ #include "yaffs_packedtags1.h"
+-#include "yaffs_tagscompat.h" // for yaffs_CalcTagsECC
++#include "yaffs_tagscompat.h" /* for yaffs_CalcTagsECC */
+
+ #include "linux/kernel.h"
+ #include "linux/version.h"
+@@ -34,9 +34,9 @@
+ #include "linux/mtd/mtd.h"
+
+ /* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+
+-const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.3 2007/05/15 20:16:11 ian Exp $";
++const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.10 2009-03-09 07:41:10 charles Exp $";
+
+ #ifndef CONFIG_YAFFS_9BYTE_TAGS
+ # define YTAG1_SIZE 8
+@@ -89,9 +89,9 @@ static struct nand_ecclayout nand_oob_16
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+ int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
++ int chunkInNAND, const __u8 *data, const yaffs_ExtendedTags *etags)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ struct mtd_oob_ops ops;
+@@ -146,7 +146,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
+
+ /* Return with empty ExtendedTags but add eccResult.
+ */
+-static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
++static int rettags(yaffs_ExtendedTags *etags, int eccResult, int retval)
+ {
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+@@ -169,9 +169,9 @@ static int rettags(yaffs_ExtendedTags *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+ int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
++ int chunkInNAND, __u8 *data, yaffs_ExtendedTags *etags)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+@@ -189,7 +189,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ ops.datbuf = data;
+ ops.oobbuf = (__u8 *)&pt1;
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+@@ -284,11 +284,11 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ */
+ int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
+ int retval;
+
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad\n", blockNo);
+
+ retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+@@ -298,7 +298,7 @@ int nandmtd1_MarkNANDBlockBad(struct yaf
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
++static int nandmtd1_TestPrerequists(struct mtd_info *mtd)
+ {
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+@@ -323,10 +323,11 @@ static int nandmtd1_TestPrerequists(stru
+ * Always returns YAFFS_OK.
+ */
+ int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * pState, int *pSequenceNumber)
++ yaffs_BlockState *pState, __u32 *pSequenceNumber)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int chunkNo = blockNo * dev->nChunksPerBlock;
++ loff_t addr = (loff_t)chunkNo * dev->nDataBytesPerChunk;
+ yaffs_ExtendedTags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+@@ -335,21 +336,22 @@ int nandmtd1_QueryNANDBlock(struct yaffs
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+- if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
++ if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK)
+ return YAFFS_FAIL;
+- }
+
+ retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
++ etags.blockBad = (mtd->block_isbad)(mtd, addr);
+ if (etags.blockBad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+- "block %d is marked bad", blockNo);
++ "block %d is marked bad\n", blockNo);
+ state = YAFFS_BLOCK_STATE_DEAD;
+- }
+- else if (etags.chunkUsed) {
++ } else if (etags.eccResult != YAFFS_ECC_RESULT_NO_ERROR) {
++ /* bad tags, need to look more closely */
++ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++ } else if (etags.chunkUsed) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.sequenceNumber;
+- }
+- else {
++ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+@@ -360,4 +362,4 @@ int nandmtd1_QueryNANDBlock(struct yaffs
+ return YAFFS_OK;
+ }
+
+-#endif /*KERNEL_VERSION*/
++#endif /*MTD_VERSION*/
+--- a/fs/yaffs2/yaffs_mtdif1.h
++++ b/fs/yaffs2/yaffs_mtdif1.h
+@@ -14,15 +14,15 @@
+ #ifndef __YAFFS_MTDIF1_H__
+ #define __YAFFS_MTDIF1_H__
+
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data, const yaffs_ExtendedTags * tags);
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data, const yaffs_ExtendedTags *tags);
+
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * data, yaffs_ExtendedTags * tags);
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *data, yaffs_ExtendedTags *tags);
+
+ int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+
+ int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif2.c
++++ b/fs/yaffs2/yaffs_mtdif2.c
+@@ -14,7 +14,7 @@
+ /* mtd interface for YAFFS2 */
+
+ const char *yaffs_mtdif2_c_version =
+- "$Id: yaffs_mtdif2.c,v 1.17 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_mtdif2.c,v 1.23 2009-03-06 17:20:53 wookey Exp $";
+
+ #include "yportenv.h"
+
+@@ -27,19 +27,23 @@ const char *yaffs_mtdif2_c_version =
+
+ #include "yaffs_packedtags2.h"
+
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags)
++/* NB For use with inband tags....
++ * We assume that the data buffer is of size totalBytersPerChunk so that we can also
++ * use it to load the tags.
++ */
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #else
+ size_t dummy;
+ #endif
+ int retval = 0;
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++ loff_t addr;
+
+ yaffs_PackedTags2 pt;
+
+@@ -48,46 +52,40 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
+ TENDSTR), chunkInNAND, data, tags));
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- if (tags)
+- yaffs_PackTags2(&pt, tags);
+- else
+- BUG(); /* both tags and data should always be present */
+
+- if (data) {
+- ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = sizeof(pt);
+- ops.len = dev->nDataBytesPerChunk;
+- ops.ooboffs = 0;
+- ops.datbuf = (__u8 *)data;
+- ops.oobbuf = (void *)&pt;
+- retval = mtd->write_oob(mtd, addr, &ops);
++ addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++
++ /* For yaffs2 writing there must be both data and tags.
++ * If we're using inband tags, then the tags are stuffed into
++ * the end of the data buffer.
++ */
++ if (!data || !tags)
++ BUG();
++ else if (dev->inbandTags) {
++ yaffs_PackedTags2TagsPart *pt2tp;
++ pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->nDataBytesPerChunk);
++ yaffs_PackTags2TagsPart(pt2tp, tags);
+ } else
+- BUG(); /* both tags and data should always be present */
+-#else
+- if (tags) {
+ yaffs_PackTags2(&pt, tags);
+- }
+
+- if (data && tags) {
+- if (dev->useNANDECC)
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, (__u8 *) & pt, NULL);
+- else
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, (__u8 *) & pt, NULL);
+- } else {
+- if (data)
+- retval =
+- mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+- data);
+- if (tags)
+- retval =
+- mtd->write_oob(mtd, addr, mtd->oobsize, &dummy,
+- (__u8 *) & pt);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ ops.mode = MTD_OOB_AUTO;
++ ops.ooblen = (dev->inbandTags) ? 0 : sizeof(pt);
++ ops.len = dev->totalBytesPerChunk;
++ ops.ooboffs = 0;
++ ops.datbuf = (__u8 *)data;
++ ops.oobbuf = (dev->inbandTags) ? NULL : (void *)&pt;
++ retval = mtd->write_oob(mtd, addr, &ops);
+
++#else
++ if (!dev->inbandTags) {
++ retval =
++ mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++ &dummy, data, (__u8 *) &pt, NULL);
++ } else {
++ retval =
++ mtd->write(mtd, addr, dev->totalBytesPerChunk, &dummy,
++ data);
+ }
+ #endif
+
+@@ -97,17 +95,18 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * data, yaffs_ExtendedTags * tags)
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *data, yaffs_ExtendedTags *tags)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+ size_t dummy;
+ int retval = 0;
++ int localData = 0;
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++ loff_t addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
+
+ yaffs_PackedTags2 pt;
+
+@@ -116,9 +115,20 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
+ TENDSTR), chunkInNAND, data, tags));
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- if (data && !tags)
+- retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
++ if (dev->inbandTags) {
++
++ if (!data) {
++ localData = 1;
++ data = yaffs_GetTempBuffer(dev, __LINE__);
++ }
++
++
++ }
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ if (dev->inbandTags || (data && !tags))
++ retval = mtd->read(mtd, addr, dev->totalBytesPerChunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+@@ -130,38 +140,42 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+ #else
+- if (data && tags) {
+- if (dev->useNANDECC) {
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, dev->spareBuffer,
+- NULL);
+- } else {
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++ if (!dev->inbandTags && data && tags) {
++
++ retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, dev->spareBuffer,
+ NULL);
+- }
+ } else {
+ if (data)
+ retval =
+ mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+ data);
+- if (tags)
++ if (!dev->inbandTags && tags)
+ retval =
+ mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
+ dev->spareBuffer);
+ }
+ #endif
+
+- memcpy(&pt, dev->spareBuffer, sizeof(pt));
+
+- if (tags)
+- yaffs_UnpackTags2(tags, &pt);
++ if (dev->inbandTags) {
++ if (tags) {
++ yaffs_PackedTags2TagsPart *pt2tp;
++ pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->nDataBytesPerChunk];
++ yaffs_UnpackTags2TagsPart(tags, pt2tp);
++ }
++ } else {
++ if (tags) {
++ memcpy(&pt, dev->spareBuffer, sizeof(pt));
++ yaffs_UnpackTags2(tags, &pt);
++ }
++ }
++
++ if (localData)
++ yaffs_ReleaseTempBuffer(dev, data, __LINE__);
+
+- if(tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
++ if (tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
+ tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
+-
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+@@ -178,7 +192,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
+ retval =
+ mtd->block_markbad(mtd,
+ blockNo * dev->nChunksPerBlock *
+- dev->nDataBytesPerChunk);
++ dev->totalBytesPerChunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+@@ -188,7 +202,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
+ }
+
+ int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber)
++ yaffs_BlockState *state, __u32 *sequenceNumber)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ int retval;
+@@ -198,7 +212,7 @@ int nandmtd2_QueryNANDBlock(struct yaffs
+ retval =
+ mtd->block_isbad(mtd,
+ blockNo * dev->nChunksPerBlock *
+- dev->nDataBytesPerChunk);
++ dev->totalBytesPerChunk);
+
+ if (retval) {
+ T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
+--- a/fs/yaffs2/yaffs_mtdif2.h
++++ b/fs/yaffs2/yaffs_mtdif2.h
+@@ -17,13 +17,13 @@
+ #define __YAFFS_MTDIF2_H__
+
+ #include "yaffs_guts.h"
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags);
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * data, yaffs_ExtendedTags * tags);
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *data, yaffs_ExtendedTags *tags);
+ int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+ int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif.c
++++ b/fs/yaffs2/yaffs_mtdif.c
+@@ -12,7 +12,7 @@
+ */
+
+ const char *yaffs_mtdif_c_version =
+- "$Id: yaffs_mtdif.c,v 1.19 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_mtdif.c,v 1.22 2009-03-06 17:20:51 wookey Exp $";
+
+ #include "yportenv.h"
+
+@@ -24,7 +24,7 @@ const char *yaffs_mtdif_c_version =
+ #include "linux/time.h"
+ #include "linux/mtd/nand.h"
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
+ static struct nand_oobinfo yaffs_oobinfo = {
+ .useecc = 1,
+ .eccbytes = 6,
+@@ -36,7 +36,7 @@ static struct nand_oobinfo yaffs_noeccin
+ };
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
+ {
+ oob[0] = spare->tagByte0;
+@@ -45,8 +45,8 @@ static inline void translate_spare2oob(c
+ oob[3] = spare->tagByte3;
+ oob[4] = spare->tagByte4;
+ oob[5] = spare->tagByte5 & 0x3f;
+- oob[5] |= spare->blockStatus == 'Y' ? 0: 0x80;
+- oob[5] |= spare->pageStatus == 0 ? 0: 0x40;
++ oob[5] |= spare->blockStatus == 'Y' ? 0 : 0x80;
++ oob[5] |= spare->pageStatus == 0 ? 0 : 0x40;
+ oob[6] = spare->tagByte6;
+ oob[7] = spare->tagByte7;
+ }
+@@ -71,18 +71,18 @@ static inline void translate_oob2spare(y
+ }
+ #endif
+
+-int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data, const yaffs_Spare * spare)
++int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data, const yaffs_Spare *spare)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ __u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+@@ -135,18 +135,18 @@ int nandmtd_WriteChunkToNAND(yaffs_Devic
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
+- yaffs_Spare * spare)
++int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
++ yaffs_Spare *spare)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ __u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+@@ -205,7 +205,7 @@ int nandmtd_ReadChunkFromNAND(yaffs_Devi
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber)
++int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ __u32 addr =
+@@ -234,7 +234,7 @@ int nandmtd_EraseBlockInNAND(yaffs_Devic
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_InitialiseNAND(yaffs_Device * dev)
++int nandmtd_InitialiseNAND(yaffs_Device *dev)
+ {
+ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_mtdif.h
++++ b/fs/yaffs2/yaffs_mtdif.h
+@@ -18,10 +18,15 @@
+
+ #include "yaffs_guts.h"
+
+-int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data, const yaffs_Spare * spare);
+-int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
+- yaffs_Spare * spare);
+-int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber);
+-int nandmtd_InitialiseNAND(yaffs_Device * dev);
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
++extern struct nand_oobinfo yaffs_oobinfo;
++extern struct nand_oobinfo yaffs_noeccinfo;
++#endif
++
++int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data, const yaffs_Spare *spare);
++int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
++ yaffs_Spare *spare);
++int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber);
++int nandmtd_InitialiseNAND(yaffs_Device *dev);
+ #endif
+--- a/fs/yaffs2/yaffs_nand.c
++++ b/fs/yaffs2/yaffs_nand.c
+@@ -12,16 +12,17 @@
+ */
+
+ const char *yaffs_nand_c_version =
+- "$Id: yaffs_nand.c,v 1.7 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_nand.c,v 1.10 2009-03-06 17:20:54 wookey Exp $";
+
+ #include "yaffs_nand.h"
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_tagsvalidity.h"
+
++#include "yaffs_getblockinfo.h"
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * buffer,
+- yaffs_ExtendedTags * tags)
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *buffer,
++ yaffs_ExtendedTags *tags)
+ {
+ int result;
+ yaffs_ExtendedTags localTags;
+@@ -29,7 +30,7 @@ int yaffs_ReadChunkWithTagsFromNAND(yaff
+ int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+- if(!tags)
++ if (!tags)
+ tags = &localTags;
+
+ if (dev->readChunkWithTagsFromNAND)
+@@ -40,20 +41,20 @@ int yaffs_ReadChunkWithTagsFromNAND(yaff
+ realignedChunkInNAND,
+ buffer,
+ tags);
+- if(tags &&
+- tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR){
++ if (tags &&
++ tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
+- yaffs_HandleChunkError(dev,bi);
++ yaffs_HandleChunkError(dev, bi);
+ }
+
+ return result;
+ }
+
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+ int chunkInNAND,
+- const __u8 * buffer,
+- yaffs_ExtendedTags * tags)
++ const __u8 *buffer,
++ yaffs_ExtendedTags *tags)
+ {
+ chunkInNAND -= dev->chunkOffset;
+
+@@ -84,7 +85,7 @@ int yaffs_WriteChunkWithTagsToNAND(yaffs
+ tags);
+ }
+
+-int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo)
++int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo)
+ {
+ blockNo -= dev->blockOffset;
+
+@@ -95,10 +96,10 @@ int yaffs_MarkBlockBad(yaffs_Device * de
+ return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
+ }
+
+-int yaffs_QueryInitialBlockState(yaffs_Device * dev,
++int yaffs_QueryInitialBlockState(yaffs_Device *dev,
+ int blockNo,
+- yaffs_BlockState * state,
+- unsigned *sequenceNumber)
++ yaffs_BlockState *state,
++ __u32 *sequenceNumber)
+ {
+ blockNo -= dev->blockOffset;
+
+--- a/fs/yaffs2/yaffs_nandemul2k.h
++++ b/fs/yaffs2/yaffs_nandemul2k.h
+@@ -21,14 +21,14 @@
+ #include "yaffs_guts.h"
+
+ int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 * data,
+- yaffs_ExtendedTags * tags);
++ int chunkInNAND, const __u8 *data,
++ const yaffs_ExtendedTags *tags);
+ int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 * data,
+- yaffs_ExtendedTags * tags);
++ int chunkInNAND, __u8 *data,
++ yaffs_ExtendedTags *tags);
+ int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+ int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+ int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND);
+ int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
+--- a/fs/yaffs2/yaffs_nand.h
++++ b/fs/yaffs2/yaffs_nand.h
+@@ -19,21 +19,21 @@
+
+
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * buffer,
+- yaffs_ExtendedTags * tags);
+-
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- const __u8 * buffer,
+- yaffs_ExtendedTags * tags);
+-
+-int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo);
+-
+-int yaffs_QueryInitialBlockState(yaffs_Device * dev,
+- int blockNo,
+- yaffs_BlockState * state,
+- unsigned *sequenceNumber);
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *buffer,
++ yaffs_ExtendedTags *tags);
++
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ const __u8 *buffer,
++ yaffs_ExtendedTags *tags);
++
++int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo);
++
++int yaffs_QueryInitialBlockState(yaffs_Device *dev,
++ int blockNo,
++ yaffs_BlockState *state,
++ unsigned *sequenceNumber);
+
+ int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND);
+--- a/fs/yaffs2/yaffs_packedtags1.c
++++ b/fs/yaffs2/yaffs_packedtags1.c
+@@ -14,7 +14,7 @@
+ #include "yaffs_packedtags1.h"
+ #include "yportenv.h"
+
+-void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t)
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t)
+ {
+ pt->chunkId = t->chunkId;
+ pt->serialNumber = t->serialNumber;
+@@ -27,7 +27,7 @@ void yaffs_PackTags1(yaffs_PackedTags1 *
+
+ }
+
+-void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt)
++void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt)
+ {
+ static const __u8 allFF[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+@@ -35,9 +35,8 @@ void yaffs_UnpackTags1(yaffs_ExtendedTag
+
+ if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
+ t->blockBad = 0;
+- if (pt->shouldBeFF != 0xFFFFFFFF) {
++ if (pt->shouldBeFF != 0xFFFFFFFF)
+ t->blockBad = 1;
+- }
+ t->chunkUsed = 1;
+ t->objectId = pt->objectId;
+ t->chunkId = pt->chunkId;
+@@ -47,6 +46,5 @@ void yaffs_UnpackTags1(yaffs_ExtendedTag
+ t->serialNumber = pt->serialNumber;
+ } else {
+ memset(t, 0, sizeof(yaffs_ExtendedTags));
+-
+ }
+ }
+--- a/fs/yaffs2/yaffs_packedtags1.h
++++ b/fs/yaffs2/yaffs_packedtags1.h
+@@ -32,6 +32,6 @@ typedef struct {
+
+ } yaffs_PackedTags1;
+
+-void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t);
+-void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt);
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_packedtags2.c
++++ b/fs/yaffs2/yaffs_packedtags2.c
+@@ -37,60 +37,68 @@
+ #define EXTRA_OBJECT_TYPE_SHIFT (28)
+ #define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+-static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 * pt)
++
++static void yaffs_DumpPackedTags2TagsPart(const yaffs_PackedTags2TagsPart *ptt)
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
+- pt->t.objectId, pt->t.chunkId, pt->t.byteCount,
+- pt->t.sequenceNumber));
++ ptt->objectId, ptt->chunkId, ptt->byteCount,
++ ptt->sequenceNumber));
++}
++static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 *pt)
++{
++ yaffs_DumpPackedTags2TagsPart(&pt->t);
+ }
+
+-static void yaffs_DumpTags2(const yaffs_ExtendedTags * t)
++static void yaffs_DumpTags2(const yaffs_ExtendedTags *t)
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+- ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte "
+- "%d del %d ser %d seq %d"
++ ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d"
+ TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
+ t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
+ t->sequenceNumber));
+
+ }
+
+-void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t)
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *ptt,
++ const yaffs_ExtendedTags *t)
+ {
+- pt->t.chunkId = t->chunkId;
+- pt->t.sequenceNumber = t->sequenceNumber;
+- pt->t.byteCount = t->byteCount;
+- pt->t.objectId = t->objectId;
++ ptt->chunkId = t->chunkId;
++ ptt->sequenceNumber = t->sequenceNumber;
++ ptt->byteCount = t->byteCount;
++ ptt->objectId = t->objectId;
+
+ if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunkId */
+- pt->t.chunkId = EXTRA_HEADER_INFO_FLAG
++ ptt->chunkId = EXTRA_HEADER_INFO_FLAG
+ | t->extraParentObjectId;
+- if (t->extraIsShrinkHeader) {
+- pt->t.chunkId |= EXTRA_SHRINK_FLAG;
+- }
+- if (t->extraShadows) {
+- pt->t.chunkId |= EXTRA_SHADOWS_FLAG;
+- }
++ if (t->extraIsShrinkHeader)
++ ptt->chunkId |= EXTRA_SHRINK_FLAG;
++ if (t->extraShadows)
++ ptt->chunkId |= EXTRA_SHADOWS_FLAG;
+
+- pt->t.objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+- pt->t.objectId |=
++ ptt->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++ ptt->objectId |=
+ (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
+
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- pt->t.byteCount = t->extraEquivalentObjectId;
+- } else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE) {
+- pt->t.byteCount = t->extraFileLength;
+- } else {
+- pt->t.byteCount = 0;
+- }
++ if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
++ ptt->byteCount = t->extraEquivalentObjectId;
++ else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE)
++ ptt->byteCount = t->extraFileLength;
++ else
++ ptt->byteCount = 0;
+ }
+
+- yaffs_DumpPackedTags2(pt);
++ yaffs_DumpPackedTags2TagsPart(ptt);
+ yaffs_DumpTags2(t);
++}
++
++
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t)
++{
++ yaffs_PackTags2TagsPart(&pt->t, t);
+
+ #ifndef YAFFS_IGNORE_TAGS_ECC
+ {
+@@ -101,82 +109,98 @@ void yaffs_PackTags2(yaffs_PackedTags2 *
+ #endif
+ }
+
+-void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt)
++
++void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t,
++ yaffs_PackedTags2TagsPart *ptt)
+ {
+
+ memset(t, 0, sizeof(yaffs_ExtendedTags));
+
+ yaffs_InitialiseTags(t);
+
+- if (pt->t.sequenceNumber != 0xFFFFFFFF) {
+- /* Page is in use */
+-#ifdef YAFFS_IGNORE_TAGS_ECC
+- {
+- t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+- }
+-#else
+- {
+- yaffs_ECCOther ecc;
+- int result;
+- yaffs_ECCCalculateOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &ecc);
+- result =
+- yaffs_ECCCorrectOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &pt->ecc, &ecc);
+- switch(result){
+- case 0:
+- t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+- break;
+- case 1:
+- t->eccResult = YAFFS_ECC_RESULT_FIXED;
+- break;
+- case -1:
+- t->eccResult = YAFFS_ECC_RESULT_UNFIXED;
+- break;
+- default:
+- t->eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+- }
+- }
+-#endif
++ if (ptt->sequenceNumber != 0xFFFFFFFF) {
+ t->blockBad = 0;
+ t->chunkUsed = 1;
+- t->objectId = pt->t.objectId;
+- t->chunkId = pt->t.chunkId;
+- t->byteCount = pt->t.byteCount;
++ t->objectId = ptt->objectId;
++ t->chunkId = ptt->chunkId;
++ t->byteCount = ptt->byteCount;
+ t->chunkDeleted = 0;
+ t->serialNumber = 0;
+- t->sequenceNumber = pt->t.sequenceNumber;
++ t->sequenceNumber = ptt->sequenceNumber;
+
+ /* Do extra header info stuff */
+
+- if (pt->t.chunkId & EXTRA_HEADER_INFO_FLAG) {
++ if (ptt->chunkId & EXTRA_HEADER_INFO_FLAG) {
+ t->chunkId = 0;
+ t->byteCount = 0;
+
+ t->extraHeaderInfoAvailable = 1;
+ t->extraParentObjectId =
+- pt->t.chunkId & (~(ALL_EXTRA_FLAGS));
++ ptt->chunkId & (~(ALL_EXTRA_FLAGS));
+ t->extraIsShrinkHeader =
+- (pt->t.chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
++ (ptt->chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
+ t->extraShadows =
+- (pt->t.chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
++ (ptt->chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+ t->extraObjectType =
+- pt->t.objectId >> EXTRA_OBJECT_TYPE_SHIFT;
++ ptt->objectId >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- t->extraEquivalentObjectId = pt->t.byteCount;
+- } else {
+- t->extraFileLength = pt->t.byteCount;
++ if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
++ t->extraEquivalentObjectId = ptt->byteCount;
++ else
++ t->extraFileLength = ptt->byteCount;
++ }
++ }
++
++ yaffs_DumpPackedTags2TagsPart(ptt);
++ yaffs_DumpTags2(t);
++
++}
++
++
++void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt)
++{
++
++ yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++
++ if (pt->t.sequenceNumber != 0xFFFFFFFF) {
++ /* Page is in use */
++#ifndef YAFFS_IGNORE_TAGS_ECC
++ {
++ yaffs_ECCOther ecc;
++ int result;
++ yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++ sizeof
++ (yaffs_PackedTags2TagsPart),
++ &ecc);
++ result =
++ yaffs_ECCCorrectOther((unsigned char *)&pt->t,
++ sizeof
++ (yaffs_PackedTags2TagsPart),
++ &pt->ecc, &ecc);
++ switch (result) {
++ case 0:
++ eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ break;
++ case 1:
++ eccResult = YAFFS_ECC_RESULT_FIXED;
++ break;
++ case -1:
++ eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ break;
++ default:
++ eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
++#endif
+ }
+
++ yaffs_UnpackTags2TagsPart(t, &pt->t);
++
++ t->eccResult = eccResult;
++
+ yaffs_DumpPackedTags2(pt);
+ yaffs_DumpTags2(t);
+
+ }
++
+--- a/fs/yaffs2/yaffs_packedtags2.h
++++ b/fs/yaffs2/yaffs_packedtags2.h
+@@ -33,6 +33,11 @@ typedef struct {
+ yaffs_ECCOther ecc;
+ } yaffs_PackedTags2;
+
+-void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t);
+-void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt);
++/* Full packed tags with ECC, used for oob tags */
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt);
++
++/* Only the tags part (no ECC for use with inband tags */
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, yaffs_PackedTags2TagsPart *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_qsort.c
++++ b/fs/yaffs2/yaffs_qsort.c
+@@ -28,12 +28,12 @@
+ */
+
+ #include "yportenv.h"
+-//#include <linux/string.h>
++/* #include <linux/string.h> */
+
+ /*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+-#define swapcode(TYPE, parmi, parmj, n) { \
++#define swapcode(TYPE, parmi, parmj, n) do { \
+ long i = (n) / sizeof (TYPE); \
+ register TYPE *pi = (TYPE *) (parmi); \
+ register TYPE *pj = (TYPE *) (parmj); \
+@@ -41,28 +41,29 @@
+ register TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+- } while (--i > 0); \
+-}
++ } while (--i > 0); \
++} while (0)
+
+ #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+- es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
++ es % sizeof(long) ? 2 : es == sizeof(long) ? 0 : 1;
+
+ static __inline void
+ swapfunc(char *a, char *b, int n, int swaptype)
+ {
+ if (swaptype <= 1)
+- swapcode(long, a, b, n)
++ swapcode(long, a, b, n);
+ else
+- swapcode(char, a, b, n)
++ swapcode(char, a, b, n);
+ }
+
+-#define swap(a, b) \
++#define yswap(a, b) do { \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+- swapfunc(a, b, es, swaptype)
++ swapfunc(a, b, es, swaptype); \
++} while (0)
+
+ #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+@@ -70,12 +71,12 @@ static __inline char *
+ med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
+ {
+ return cmp(a, b) < 0 ?
+- (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a ))
+- :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c ));
++ (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a))
++ : (cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c));
+ }
+
+ #ifndef min
+-#define min(a,b) (((a) < (b)) ? (a) : (b))
++#define min(a, b) (((a) < (b)) ? (a) : (b))
+ #endif
+
+ void
+@@ -92,7 +93,7 @@ loop: SWAPINIT(a, es);
+ for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+- swap(pl, pl - es);
++ yswap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+@@ -107,7 +108,7 @@ loop: SWAPINIT(a, es);
+ }
+ pm = med3(pl, pm, pn, cmp);
+ }
+- swap(a, pm);
++ yswap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+@@ -115,7 +116,7 @@ loop: SWAPINIT(a, es);
+ while (pb <= pc && (r = cmp(pb, a)) <= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+- swap(pa, pb);
++ yswap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+@@ -123,14 +124,14 @@ loop: SWAPINIT(a, es);
+ while (pb <= pc && (r = cmp(pc, a)) >= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+- swap(pc, pd);
++ yswap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+- swap(pb, pc);
++ yswap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+@@ -139,7 +140,7 @@ loop: SWAPINIT(a, es);
+ for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+- swap(pl, pl - es);
++ yswap(pl, pl - es);
+ return;
+ }
+
+@@ -148,9 +149,11 @@ loop: SWAPINIT(a, es);
+ vecswap(a, pb - r, r);
+ r = min((long)(pd - pc), (long)(pn - pd - es));
+ vecswap(pb, pn - r, r);
+- if ((r = pb - pa) > es)
++ r = pb - pa;
++ if (r > es)
+ yaffs_qsort(a, r / es, es, cmp);
+- if ((r = pd - pc) > es) {
++ r = pd - pc;
++ if (r > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+--- a/fs/yaffs2/yaffs_qsort.h
++++ b/fs/yaffs2/yaffs_qsort.h
+@@ -17,7 +17,7 @@
+ #ifndef __YAFFS_QSORT_H__
+ #define __YAFFS_QSORT_H__
+
+-extern void yaffs_qsort (void *const base, size_t total_elems, size_t size,
+- int (*cmp)(const void *, const void *));
++extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
++ int (*cmp)(const void *, const void *));
+
+ #endif
+--- a/fs/yaffs2/yaffs_tagscompat.c
++++ b/fs/yaffs2/yaffs_tagscompat.c
+@@ -14,16 +14,17 @@
+ #include "yaffs_guts.h"
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_ecc.h"
++#include "yaffs_getblockinfo.h"
+
+-static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND);
++static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND);
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_Spare * spare);
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_Spare * spare);
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND);
++static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND);
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_Spare *spare);
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_Spare *spare);
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND);
+ #endif
+
+ static const char yaffs_countBitsTable[256] = {
+@@ -54,13 +55,13 @@ int yaffs_CountBits(__u8 x)
+
+ /********** Tags ECC calculations *********/
+
+-void yaffs_CalcECC(const __u8 * data, yaffs_Spare * spare)
++void yaffs_CalcECC(const __u8 *data, yaffs_Spare *spare)
+ {
+ yaffs_ECCCalculate(data, spare->ecc1);
+ yaffs_ECCCalculate(&data[256], spare->ecc2);
+ }
+
+-void yaffs_CalcTagsECC(yaffs_Tags * tags)
++void yaffs_CalcTagsECC(yaffs_Tags *tags)
+ {
+ /* Calculate an ecc */
+
+@@ -74,9 +75,8 @@ void yaffs_CalcTagsECC(yaffs_Tags * tags
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+- if (b[i] & j) {
++ if (b[i] & j)
+ ecc ^= bit;
+- }
+ }
+ }
+
+@@ -84,7 +84,7 @@ void yaffs_CalcTagsECC(yaffs_Tags * tags
+
+ }
+
+-int yaffs_CheckECCOnTags(yaffs_Tags * tags)
++int yaffs_CheckECCOnTags(yaffs_Tags *tags)
+ {
+ unsigned ecc = tags->ecc;
+
+@@ -115,8 +115,8 @@ int yaffs_CheckECCOnTags(yaffs_Tags * ta
+
+ /********** Tags **********/
+
+-static void yaffs_LoadTagsIntoSpare(yaffs_Spare * sparePtr,
+- yaffs_Tags * tagsPtr)
++static void yaffs_LoadTagsIntoSpare(yaffs_Spare *sparePtr,
++ yaffs_Tags *tagsPtr)
+ {
+ yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
+
+@@ -132,8 +132,8 @@ static void yaffs_LoadTagsIntoSpare(yaff
+ sparePtr->tagByte7 = tu->asBytes[7];
+ }
+
+-static void yaffs_GetTagsFromSpare(yaffs_Device * dev, yaffs_Spare * sparePtr,
+- yaffs_Tags * tagsPtr)
++static void yaffs_GetTagsFromSpare(yaffs_Device *dev, yaffs_Spare *sparePtr,
++ yaffs_Tags *tagsPtr)
+ {
+ yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
+ int result;
+@@ -148,21 +148,20 @@ static void yaffs_GetTagsFromSpare(yaffs
+ tu->asBytes[7] = sparePtr->tagByte7;
+
+ result = yaffs_CheckECCOnTags(tagsPtr);
+- if (result > 0) {
++ if (result > 0)
+ dev->tagsEccFixed++;
+- } else if (result < 0) {
++ else if (result < 0)
+ dev->tagsEccUnfixed++;
+- }
+ }
+
+-static void yaffs_SpareInitialise(yaffs_Spare * spare)
++static void yaffs_SpareInitialise(yaffs_Spare *spare)
+ {
+ memset(spare, 0xFF, sizeof(yaffs_Spare));
+ }
+
+ static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 * data,
+- yaffs_Spare * spare)
++ int chunkInNAND, const __u8 *data,
++ yaffs_Spare *spare)
+ {
+ if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
+ T(YAFFS_TRACE_ERROR,
+@@ -177,9 +176,9 @@ static int yaffs_WriteChunkToNAND(struct
+
+ static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND,
+- __u8 * data,
+- yaffs_Spare * spare,
+- yaffs_ECCResult * eccResult,
++ __u8 *data,
++ yaffs_Spare *spare,
++ yaffs_ECCResult *eccResult,
+ int doErrorCorrection)
+ {
+ int retVal;
+@@ -252,9 +251,11 @@ static int yaffs_ReadChunkFromNAND(struc
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_NANDSpare nspare;
+- retVal =
+- dev->readChunkFromNAND(dev, chunkInNAND, data,
+- (yaffs_Spare *) & nspare);
++
++ memset(&nspare, 0, sizeof(nspare));
++
++ retVal = dev->readChunkFromNAND(dev, chunkInNAND, data,
++ (yaffs_Spare *) &nspare);
+ memcpy(spare, &nspare, sizeof(yaffs_Spare));
+ if (data && doErrorCorrection) {
+ if (nspare.eccres1 > 0) {
+@@ -302,8 +303,7 @@ static int yaffs_ReadChunkFromNAND(struc
+ static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND)
+ {
+-
+- static int init = 0;
++ static int init;
+ static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
+ static __u8 data[YAFFS_BYTES_PER_CHUNK];
+ /* Might as well always allocate the larger size for */
+@@ -331,12 +331,12 @@ static int yaffs_CheckChunkErased(struct
+ * Functions for robustisizing
+ */
+
+-static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND)
++static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND)
+ {
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+
+ /* Mark the block for retirement */
+- yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++ yaffs_GetBlockInfo(dev, blockInNAND + dev->blockOffset)->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
+
+@@ -348,22 +348,22 @@ static void yaffs_HandleReadDataError(ya
+ }
+
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND)
++static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_Spare * spare)
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_Spare *spare)
+ {
+ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_Spare * spare)
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_Spare *spare)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND)
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND)
+ {
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+
+@@ -373,8 +373,8 @@ static void yaffs_HandleWriteChunkError(
+ yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+ }
+
+-static int yaffs_VerifyCompare(const __u8 * d0, const __u8 * d1,
+- const yaffs_Spare * s0, const yaffs_Spare * s1)
++static int yaffs_VerifyCompare(const __u8 *d0, const __u8 *d1,
++ const yaffs_Spare *s0, const yaffs_Spare *s1)
+ {
+
+ if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
+@@ -398,28 +398,35 @@ static int yaffs_VerifyCompare(const __u
+ }
+ #endif /* NOTYET */
+
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags *
+- eTags)
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *eTags)
+ {
+ yaffs_Spare spare;
+ yaffs_Tags tags;
+
+ yaffs_SpareInitialise(&spare);
+
+- if (eTags->chunkDeleted) {
++ if (eTags->chunkDeleted)
+ spare.pageStatus = 0;
+- } else {
++ else {
+ tags.objectId = eTags->objectId;
+ tags.chunkId = eTags->chunkId;
+- tags.byteCount = eTags->byteCount;
++
++ tags.byteCountLSB = eTags->byteCount & 0x3ff;
++
++ if (dev->nDataBytesPerChunk >= 1024)
++ tags.byteCountMSB = (eTags->byteCount >> 10) & 3;
++ else
++ tags.byteCountMSB = 3;
++
++
+ tags.serialNumber = eTags->serialNumber;
+
+- if (!dev->useNANDECC && data) {
++ if (!dev->useNANDECC && data)
+ yaffs_CalcECC(data, &spare);
+- }
++
+ yaffs_LoadTagsIntoSpare(&spare, &tags);
+
+ }
+@@ -427,15 +434,15 @@ int yaffs_TagsCompatabilityWriteChunkWit
+ return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
+ }
+
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
+ int chunkInNAND,
+- __u8 * data,
+- yaffs_ExtendedTags * eTags)
++ __u8 *data,
++ yaffs_ExtendedTags *eTags)
+ {
+
+ yaffs_Spare spare;
+ yaffs_Tags tags;
+- yaffs_ECCResult eccResult;
++ yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+
+ static yaffs_Spare spareFF;
+ static int init;
+@@ -466,7 +473,11 @@ int yaffs_TagsCompatabilityReadChunkWith
+
+ eTags->objectId = tags.objectId;
+ eTags->chunkId = tags.chunkId;
+- eTags->byteCount = tags.byteCount;
++ eTags->byteCount = tags.byteCountLSB;
++
++ if (dev->nDataBytesPerChunk >= 1024)
++ eTags->byteCount |= (((unsigned) tags.byteCountMSB) << 10);
++
+ eTags->serialNumber = tags.serialNumber;
+ }
+ }
+@@ -497,9 +508,9 @@ int yaffs_TagsCompatabilityMarkNANDBlock
+ }
+
+ int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo, yaffs_BlockState *
+- state,
+- int *sequenceNumber)
++ int blockNo,
++ yaffs_BlockState *state,
++ __u32 *sequenceNumber)
+ {
+
+ yaffs_Spare spare0, spare1;
+--- a/fs/yaffs2/yaffs_tagscompat.h
++++ b/fs/yaffs2/yaffs_tagscompat.h
+@@ -17,24 +17,23 @@
+ #define __YAFFS_TAGSCOMPAT_H__
+
+ #include "yaffs_guts.h"
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags *
+- tags);
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- __u8 * data,
+- yaffs_ExtendedTags *
+- tags);
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ __u8 *data,
++ yaffs_ExtendedTags *tags);
+ int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+ int blockNo);
+ int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo, yaffs_BlockState *
+- state, int *sequenceNumber);
++ int blockNo,
++ yaffs_BlockState *state,
++ __u32 *sequenceNumber);
+
+-void yaffs_CalcTagsECC(yaffs_Tags * tags);
+-int yaffs_CheckECCOnTags(yaffs_Tags * tags);
++void yaffs_CalcTagsECC(yaffs_Tags *tags);
++int yaffs_CheckECCOnTags(yaffs_Tags *tags);
+ int yaffs_CountBits(__u8 byte);
+
+ #endif
+--- a/fs/yaffs2/yaffs_tagsvalidity.c
++++ b/fs/yaffs2/yaffs_tagsvalidity.c
+@@ -13,14 +13,14 @@
+
+ #include "yaffs_tagsvalidity.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags * tags)
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags)
+ {
+ memset(tags, 0, sizeof(yaffs_ExtendedTags));
+ tags->validMarker0 = 0xAAAAAAAA;
+ tags->validMarker1 = 0x55555555;
+ }
+
+-int yaffs_ValidateTags(yaffs_ExtendedTags * tags)
++int yaffs_ValidateTags(yaffs_ExtendedTags *tags)
+ {
+ return (tags->validMarker0 == 0xAAAAAAAA &&
+ tags->validMarker1 == 0x55555555);
+--- a/fs/yaffs2/yaffs_tagsvalidity.h
++++ b/fs/yaffs2/yaffs_tagsvalidity.h
+@@ -19,6 +19,6 @@
+
+ #include "yaffs_guts.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
+-int yaffs_ValidateTags(yaffs_ExtendedTags * tags);
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
++int yaffs_ValidateTags(yaffs_ExtendedTags *tags);
+ #endif
+--- a/fs/yaffs2/yportenv.h
++++ b/fs/yaffs2/yportenv.h
+@@ -17,17 +17,28 @@
+ #ifndef __YPORTENV_H__
+ #define __YPORTENV_H__
+
++/*
++ * Define the MTD version in terms of Linux Kernel versions
++ * This allows yaffs to be used independantly of the kernel
++ * as well as with it.
++ */
++
++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
+ #if defined CONFIG_YAFFS_WINCE
+
+ #include "ywinceenv.h"
+
+-#elif defined __KERNEL__
++#elif defined __KERNEL__
+
+ #include "moduleconfig.h"
+
+ /* Linux kernel */
++
+ #include <linux/version.h>
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#define MTD_VERSION_CODE LINUX_VERSION_CODE
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ #include <linux/config.h>
+ #endif
+ #include <linux/kernel.h>
+@@ -40,12 +51,13 @@
+ #define YCHAR char
+ #define YUCHAR unsigned char
+ #define _Y(x) x
+-#define yaffs_strcpy(a,b) strcpy(a,b)
+-#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
+-#define yaffs_strncmp(a,b,c) strncmp(a,b,c)
+-#define yaffs_strlen(s) strlen(s)
+-#define yaffs_sprintf sprintf
+-#define yaffs_toupper(a) toupper(a)
++#define yaffs_strcat(a, b) strcat(a, b)
++#define yaffs_strcpy(a, b) strcpy(a, b)
++#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
++#define yaffs_strncmp(a, b, c) strncmp(a, b, c)
++#define yaffs_strlen(s) strlen(s)
++#define yaffs_sprintf sprintf
++#define yaffs_toupper(a) toupper(a)
+
+ #define Y_INLINE inline
+
+@@ -53,19 +65,19 @@
+ #define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+ /* #define YPRINTF(x) printk x */
+-#define YMALLOC(x) kmalloc(x,GFP_KERNEL)
++#define YMALLOC(x) kmalloc(x, GFP_NOFS)
+ #define YFREE(x) kfree(x)
+ #define YMALLOC_ALT(x) vmalloc(x)
+ #define YFREE_ALT(x) vfree(x)
+ #define YMALLOC_DMA(x) YMALLOC(x)
+
+-// KR - added for use in scan so processes aren't blocked indefinitely.
++/* KR - added for use in scan so processes aren't blocked indefinitely. */
+ #define YYIELD() schedule()
+
+ #define YAFFS_ROOT_MODE 0666
+ #define YAFFS_LOSTNFOUND_MODE 0666
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ #define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+ #define Y_TIME_CONVERT(x) (x).tv_sec
+ #else
+@@ -73,11 +85,12 @@
+ #define Y_TIME_CONVERT(x) (x)
+ #endif
+
+-#define yaffs_SumCompare(x,y) ((x) == (y))
+-#define yaffs_strcmp(a,b) strcmp(a,b)
++#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #define TENDSTR "\n"
+ #define TSTR(x) KERN_WARNING x
++#define TCONT(x) x
+ #define TOUT(p) printk p
+
+ #define yaffs_trace(mask, fmt, args...) \
+@@ -90,6 +103,8 @@
+
+ #elif defined CONFIG_YAFFS_DIRECT
+
++#define MTD_VERSION_CODE MTD_VERSION(2, 6, 22)
++
+ /* Direct interface */
+ #include "ydirectenv.h"
+
+@@ -111,11 +126,12 @@
+ #define YCHAR char
+ #define YUCHAR unsigned char
+ #define _Y(x) x
+-#define yaffs_strcpy(a,b) strcpy(a,b)
+-#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
+-#define yaffs_strlen(s) strlen(s)
+-#define yaffs_sprintf sprintf
+-#define yaffs_toupper(a) toupper(a)
++#define yaffs_strcat(a, b) strcat(a, b)
++#define yaffs_strcpy(a, b) strcpy(a, b)
++#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
++#define yaffs_strlen(s) strlen(s)
++#define yaffs_sprintf sprintf
++#define yaffs_toupper(a) toupper(a)
+
+ #define Y_INLINE inline
+
+@@ -133,8 +149,8 @@
+ #define YAFFS_ROOT_MODE 0666
+ #define YAFFS_LOSTNFOUND_MODE 0666
+
+-#define yaffs_SumCompare(x,y) ((x) == (y))
+-#define yaffs_strcmp(a,b) strcmp(a,b)
++#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #else
+ /* Should have specified a configuration type */
+@@ -178,10 +194,10 @@ extern unsigned int yaffs_wr_attempts;
+ #define YAFFS_TRACE_ALWAYS 0xF0000000
+
+
+-#define T(mask,p) do{ if((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p);} while(0)
++#define T(mask, p) do { if ((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
+
+-#ifndef CONFIG_YAFFS_WINCE
+-#define YBUG() T(YAFFS_TRACE_BUG,(TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),__LINE__))
++#ifndef YBUG
++#define YBUG() do {T(YAFFS_TRACE_BUG, (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR), __LINE__)); } while (0)
+ #endif
+
+ #endif
diff --git a/target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch b/target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch
new file mode 100644
index 0000000..f4535a6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch
@@ -0,0 +1,27068 @@
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -35,7 +35,6 @@ source "fs/gfs2/Kconfig"
+ source "fs/ocfs2/Kconfig"
+ source "fs/btrfs/Kconfig"
+ source "fs/nilfs2/Kconfig"
+-source "fs/yaffs2/Kconfig"
+
+ endif # BLOCK
+
+@@ -201,6 +200,10 @@ source "fs/hfsplus/Kconfig"
+ source "fs/befs/Kconfig"
+ source "fs/bfs/Kconfig"
+ source "fs/efs/Kconfig"
++
++# Patched by YAFFS
++source "fs/yaffs2/Kconfig"
++
+ source "fs/jffs2/Kconfig"
+ # UBIFS File system configuration
+ source "fs/ubifs/Kconfig"
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -125,5 +125,6 @@ obj-$(CONFIG_GFS2_FS) += gfs2/
+ obj-y += exofs/ # Multiple modules
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
+-obj-$(CONFIG_YAFFS_FS) += yaffs2/
+
++# Patched by YAFFS
++obj-$(CONFIG_YAFFS_FS) += yaffs2/
+--- a/fs/yaffs2/devextras.h
++++ b/fs/yaffs2/devextras.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -24,6 +24,8 @@
+ #define __EXTRAS_H__
+
+
++#include "yportenv.h"
++
+ #if !(defined __KERNEL__)
+
+ /* Definition of types */
+@@ -33,103 +35,6 @@ typedef unsigned __u32;
+
+ #endif
+
+-/*
+- * This is a simple doubly linked list implementation that matches the
+- * way the Linux kernel doubly linked list implementation works.
+- */
+-
+-struct ylist_head {
+- struct ylist_head *next; /* next in chain */
+- struct ylist_head *prev; /* previous in chain */
+-};
+-
+-
+-/* Initialise a static list */
+-#define YLIST_HEAD(name) \
+-struct ylist_head name = { &(name), &(name)}
+-
+-
+-
+-/* Initialise a list head to an empty list */
+-#define YINIT_LIST_HEAD(p) \
+-do { \
+- (p)->next = (p);\
+- (p)->prev = (p); \
+-} while (0)
+-
+-
+-/* Add an element to a list */
+-static __inline__ void ylist_add(struct ylist_head *newEntry,
+- struct ylist_head *list)
+-{
+- struct ylist_head *listNext = list->next;
+-
+- list->next = newEntry;
+- newEntry->prev = list;
+- newEntry->next = listNext;
+- listNext->prev = newEntry;
+-
+-}
+-
+-static __inline__ void ylist_add_tail(struct ylist_head *newEntry,
+- struct ylist_head *list)
+-{
+- struct ylist_head *listPrev = list->prev;
+-
+- list->prev = newEntry;
+- newEntry->next = list;
+- newEntry->prev = listPrev;
+- listPrev->next = newEntry;
+-
+-}
+-
+-
+-/* Take an element out of its current list, with or without
+- * reinitialising the links.of the entry*/
+-static __inline__ void ylist_del(struct ylist_head *entry)
+-{
+- struct ylist_head *listNext = entry->next;
+- struct ylist_head *listPrev = entry->prev;
+-
+- listNext->prev = listPrev;
+- listPrev->next = listNext;
+-
+-}
+-
+-static __inline__ void ylist_del_init(struct ylist_head *entry)
+-{
+- ylist_del(entry);
+- entry->next = entry->prev = entry;
+-}
+-
+-
+-/* Test if the list is empty */
+-static __inline__ int ylist_empty(struct ylist_head *entry)
+-{
+- return (entry->next == entry);
+-}
+-
+-
+-/* ylist_entry takes a pointer to a list entry and offsets it to that
+- * we can find a pointer to the object it is embedded in.
+- */
+-
+-
+-#define ylist_entry(entry, type, member) \
+- ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
+-
+-
+-/* ylist_for_each and list_for_each_safe iterate over lists.
+- * ylist_for_each_safe uses temporary storage to make the list delete safe
+- */
+-
+-#define ylist_for_each(itervar, list) \
+- for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
+-
+-#define ylist_for_each_safe(itervar, saveVar, list) \
+- for (itervar = (list)->next, saveVar = (list)->next->next; \
+- itervar != (list); itervar = saveVar, saveVar = saveVar->next)
+-
+
+ #if !(defined __KERNEL__)
+
+--- a/fs/yaffs2/Kconfig
++++ b/fs/yaffs2/Kconfig
+@@ -90,23 +90,15 @@ config YAFFS_AUTO_YAFFS2
+
+ If unsure, say Y.
+
+-config YAFFS_DISABLE_LAZY_LOAD
+- bool "Disable lazy loading"
+- depends on YAFFS_YAFFS2
++config YAFFS_DISABLE_TAGS_ECC
++ bool "Disable YAFFS from doing ECC on tags by default"
++ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+- "Lazy loading" defers loading file details until they are
+- required. This saves mount time, but makes the first look-up
+- a bit longer.
+-
+- Lazy loading will only happen if enabled by this option being 'n'
+- and if the appropriate tags are available, else yaffs2 will
+- automatically fall back to immediate loading and do the right
+- thing.
+-
+- Lazy laoding will be required by checkpointing.
+-
+- Setting this to 'y' will disable lazy loading.
++ This defaults Yaffs to using its own ECC calculations on tags instead of
++ just relying on the MTD.
++ This behavior can also be overridden with tags_ecc_on and
++ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+@@ -154,3 +146,45 @@ config YAFFS_SHORT_NAMES_IN_RAM
+ but makes look-ups faster.
+
+ If unsure, say Y.
++
++config YAFFS_EMPTY_LOST_AND_FOUND
++ bool "Empty lost and found on boot"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is enabled then the contents of lost and found is
++ automatically dumped at mount.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BLOCK_REFRESHING
++ bool "Disable yaffs2 block refreshing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then block refreshing is disabled.
++ Block refreshing infrequently refreshes the oldest block in
++ a yaffs2 file system. This mechanism helps to refresh flash to
++ mitigate against data loss. This is particularly useful for MLC.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BACKGROUND
++ bool "Disable yaffs2 background processing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then background processing is disabled.
++ Background processing makes many foreground activities faster.
++
++ If unsure, say N.
++
++config YAFFS_XATTR
++ bool "Enable yaffs2 xattr support"
++ depends on YAFFS_FS
++ default y
++ help
++ If this is set then yaffs2 will provide xattr support.
++ If unsure, say Y.
++
++
+--- a/fs/yaffs2/Makefile
++++ b/fs/yaffs2/Makefile
+@@ -4,7 +4,14 @@
+
+ obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+-yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
+-yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
++yaffs-y := yaffs_ecc.o yaffs_vfs_glue.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+ yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+ yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
++yaffs-y += yaffs_nameval.o
++yaffs-y += yaffs_allocator.o
++yaffs-y += yaffs_yaffs1.o
++yaffs-y += yaffs_yaffs2.o
++yaffs-y += yaffs_bitmap.o
++yaffs-y += yaffs_verify.o
++
+--- a/fs/yaffs2/moduleconfig.h
++++ b/fs/yaffs2/moduleconfig.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Martin Fouts <Martin.Fouts@palmsource.com>
+@@ -29,25 +29,46 @@
+ /* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
+ /* #define CONFIG_YAFFS_DOES_ECC */
+
++/* Default: Selected */
++/* Meaning: Yaffs does its own ECC on tags for packed tags rather than use mtd */
++#define CONFIG_YAFFS_DOES_TAGS_ECC
++
+ /* Default: Not selected */
+ /* Meaning: ECC byte order is 'wrong'. Only meaningful if */
+ /* CONFIG_YAFFS_DOES_ECC is set */
+ /* #define CONFIG_YAFFS_ECC_WRONG_ORDER */
+
+-/* Default: Selected */
+-/* Meaning: Disables testing whether chunks are erased before writing to them*/
+-#define CONFIG_YAFFS_DISABLE_CHUNK_ERASED_CHECK
++/* Default: Not selected */
++/* Meaning: Always test whether chunks are erased before writing to them.
++ Use during mtd debugging and init. */
++/* #define CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED */
++
++/* Default: Not Selected */
++/* Meaning: At mount automatically empty all files from lost and found. */
++/* This is done to fix an old problem where rmdir was not checking for an */
++/* empty directory. This can also be achieved with a mount option. */
++#define CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
+
+ /* Default: Selected */
+ /* Meaning: Cache short names, taking more RAM, but faster look-ups */
+ #define CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+
+-/* Default: 10 */
+-/* Meaning: set the count of blocks to reserve for checkpointing */
+-#define CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS 10
++/* Default: Unselected */
++/* Meaning: Select to disable block refreshing. */
++/* Block Refreshing periodically rewrites the oldest block. */
++/* #define CONFIG_DISABLE_BLOCK_REFRESHING */
++
++/* Default: Unselected */
++/* Meaning: Select to disable background processing */
++/* #define CONFIG_DISABLE_BACKGROUND */
++
++
++/* Default: Selected */
++/* Meaning: Enable XATTR support */
++#define CONFIG_YAFFS_XATTR
+
+ /*
+-Older-style on-NAND data format has a "pageStatus" byte to record
++Older-style on-NAND data format has a "page_status" byte to record
+ chunk/page state. This byte is zeroed when the page is discarded.
+ Choose this option if you have existing on-NAND data in this format
+ that you need to continue to support. New data written also uses the
+@@ -57,7 +78,7 @@ adjusted to use the older-style format.
+ MTD versions in yaffs_mtdif1.c.
+ */
+ /* Default: Not selected */
+-/* Meaning: Use older-style on-NAND data format with pageStatus byte */
++/* Meaning: Use older-style on-NAND data format with page_status byte */
+ /* #define CONFIG_YAFFS_9BYTE_TAGS */
+
+ #endif /* YAFFS_OUT_OF_TREE */
+--- /dev/null
++++ b/fs/yaffs2/yaffs_allocator.c
+@@ -0,0 +1,409 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++
++#ifdef CONFIG_YAFFS_YMALLOC_ALLOCATOR
++
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev)
++{
++ return (yaffs_tnode_t *)YMALLOC(dev->tnode_size);
++}
++
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ dev = dev;
++ YFREE(tn);
++}
++
++void yaffs_init_raw_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++void yaffs_deinit_raw_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev)
++{
++ dev = dev;
++ return (yaffs_obj_t *) YMALLOC(sizeof(yaffs_obj_t));
++}
++
++
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++
++ dev = dev;
++ YFREE(obj);
++}
++
++#else
++
++struct yaffs_tnode_list {
++ struct yaffs_tnode_list *next;
++ yaffs_tnode_t *tnodes;
++};
++
++typedef struct yaffs_tnode_list yaffs_tnodelist_t;
++
++struct yaffs_obj_tList_struct {
++ yaffs_obj_t *objects;
++ struct yaffs_obj_tList_struct *next;
++};
++
++typedef struct yaffs_obj_tList_struct yaffs_obj_tList;
++
++
++struct yaffs_AllocatorStruct {
++ int n_tnodesCreated;
++ yaffs_tnode_t *freeTnodes;
++ int nFreeTnodes;
++ yaffs_tnodelist_t *allocatedTnodeList;
++
++ int n_objCreated;
++ yaffs_obj_t *freeObjects;
++ int nFreeObjects;
++
++ yaffs_obj_tList *allocatedObjectList;
++};
++
++typedef struct yaffs_AllocatorStruct yaffs_Allocator;
++
++
++static void yaffs_deinit_raw_tnodes(yaffs_dev_t *dev)
++{
++
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++
++ yaffs_tnodelist_t *tmp;
++
++ if(!allocator){
++ YBUG();
++ return;
++ }
++
++ while (allocator->allocatedTnodeList) {
++ tmp = allocator->allocatedTnodeList->next;
++
++ YFREE(allocator->allocatedTnodeList->tnodes);
++ YFREE(allocator->allocatedTnodeList);
++ allocator->allocatedTnodeList = tmp;
++
++ }
++
++ allocator->freeTnodes = NULL;
++ allocator->nFreeTnodes = 0;
++ allocator->n_tnodesCreated = 0;
++}
++
++static void yaffs_init_raw_tnodes(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(allocator){
++ allocator->allocatedTnodeList = NULL;
++ allocator->freeTnodes = NULL;
++ allocator->nFreeTnodes = 0;
++ allocator->n_tnodesCreated = 0;
++ } else
++ YBUG();
++}
++
++static int yaffs_create_tnodes(yaffs_dev_t *dev, int n_tnodes)
++{
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++ int i;
++ yaffs_tnode_t *newTnodes;
++ __u8 *mem;
++ yaffs_tnode_t *curr;
++ yaffs_tnode_t *next;
++ yaffs_tnodelist_t *tnl;
++
++ if(!allocator){
++ YBUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_tnodes < 1)
++ return YAFFS_OK;
++
++
++ /* make these things */
++
++ newTnodes = YMALLOC(n_tnodes * dev->tnode_size);
++ mem = (__u8 *)newTnodes;
++
++ if (!newTnodes) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
++ return YAFFS_FAIL;
++ }
++
++ /* New hookup for wide tnodes */
++ for (i = 0; i < n_tnodes - 1; i++) {
++ curr = (yaffs_tnode_t *) &mem[i * dev->tnode_size];
++ next = (yaffs_tnode_t *) &mem[(i+1) * dev->tnode_size];
++ curr->internal[0] = next;
++ }
++
++ curr = (yaffs_tnode_t *) &mem[(n_tnodes - 1) * dev->tnode_size];
++ curr->internal[0] = allocator->freeTnodes;
++ allocator->freeTnodes = (yaffs_tnode_t *)mem;
++
++ allocator->nFreeTnodes += n_tnodes;
++ allocator->n_tnodesCreated += n_tnodes;
++
++ /* Now add this bunch of tnodes to a list for freeing up.
++ * NB If we can't add this to the management list it isn't fatal
++ * but it just means we can't free this bunch of tnodes later.
++ */
++
++ tnl = YMALLOC(sizeof(yaffs_tnodelist_t));
++ if (!tnl) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs: Could not add tnodes to management list" TENDSTR)));
++ return YAFFS_FAIL;
++ } else {
++ tnl->tnodes = newTnodes;
++ tnl->next = allocator->allocatedTnodeList;
++ allocator->allocatedTnodeList = tnl;
++ }
++
++ T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
++
++ return YAFFS_OK;
++}
++
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++ yaffs_tnode_t *tn = NULL;
++
++ if(!allocator){
++ YBUG();
++ return NULL;
++ }
++
++ /* If there are none left make more */
++ if (!allocator->freeTnodes)
++ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
++
++ if (allocator->freeTnodes) {
++ tn = allocator->freeTnodes;
++ allocator->freeTnodes = allocator->freeTnodes->internal[0];
++ allocator->nFreeTnodes--;
++ }
++
++ return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(!allocator){
++ YBUG();
++ return;
++ }
++
++ if (tn) {
++ tn->internal[0] = allocator->freeTnodes;
++ allocator->freeTnodes = tn;
++ allocator->nFreeTnodes++;
++ }
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
++}
++
++
++
++static void yaffs_init_raw_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(allocator) {
++ allocator->allocatedObjectList = NULL;
++ allocator->freeObjects = NULL;
++ allocator->nFreeObjects = 0;
++ } else
++ YBUG();
++}
++
++static void yaffs_deinit_raw_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ yaffs_obj_tList *tmp;
++
++ if(!allocator){
++ YBUG();
++ return;
++ }
++
++ while (allocator->allocatedObjectList) {
++ tmp = allocator->allocatedObjectList->next;
++ YFREE(allocator->allocatedObjectList->objects);
++ YFREE(allocator->allocatedObjectList);
++
++ allocator->allocatedObjectList = tmp;
++ }
++
++ allocator->freeObjects = NULL;
++ allocator->nFreeObjects = 0;
++ allocator->n_objCreated = 0;
++}
++
++
++static int yaffs_create_free_objs(yaffs_dev_t *dev, int n_obj)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ int i;
++ yaffs_obj_t *newObjects;
++ yaffs_obj_tList *list;
++
++ if(!allocator){
++ YBUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_obj < 1)
++ return YAFFS_OK;
++
++ /* make these things */
++ newObjects = YMALLOC(n_obj * sizeof(yaffs_obj_t));
++ list = YMALLOC(sizeof(yaffs_obj_tList));
++
++ if (!newObjects || !list) {
++ if (newObjects){
++ YFREE(newObjects);
++ newObjects = NULL;
++ }
++ if (list){
++ YFREE(list);
++ list = NULL;
++ }
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
++ return YAFFS_FAIL;
++ }
++
++ /* Hook them into the free list */
++ for (i = 0; i < n_obj - 1; i++) {
++ newObjects[i].siblings.next =
++ (struct ylist_head *)(&newObjects[i + 1]);
++ }
++
++ newObjects[n_obj - 1].siblings.next = (void *)allocator->freeObjects;
++ allocator->freeObjects = newObjects;
++ allocator->nFreeObjects += n_obj;
++ allocator->n_objCreated += n_obj;
++
++ /* Now add this bunch of Objects to a list for freeing up. */
++
++ list->objects = newObjects;
++ list->next = allocator->allocatedObjectList;
++ allocator->allocatedObjectList = list;
++
++ return YAFFS_OK;
++}
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj = NULL;
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(!allocator) {
++ YBUG();
++ return obj;
++ }
++
++ /* If there are none left make more */
++ if (!allocator->freeObjects)
++ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
++
++ if (allocator->freeObjects) {
++ obj = allocator->freeObjects;
++ allocator->freeObjects =
++ (yaffs_obj_t *) (allocator->freeObjects->siblings.next);
++ allocator->nFreeObjects--;
++ }
++
++ return obj;
++}
++
++
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(!allocator)
++ YBUG();
++ else {
++ /* Link into the free list. */
++ obj->siblings.next = (struct ylist_head *)(allocator->freeObjects);
++ allocator->freeObjects = obj;
++ allocator->nFreeObjects++;
++ }
++}
++
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ if(dev->allocator){
++ yaffs_deinit_raw_tnodes(dev);
++ yaffs_deinit_raw_objs(dev);
++
++ YFREE(dev->allocator);
++ dev->allocator=NULL;
++ } else
++ YBUG();
++}
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator;
++
++ if(!dev->allocator){
++ allocator = YMALLOC(sizeof(yaffs_Allocator));
++ if(allocator){
++ dev->allocator = allocator;
++ yaffs_init_raw_tnodes(dev);
++ yaffs_init_raw_objs(dev);
++ }
++ } else
++ YBUG();
++}
++
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_allocator.h
+@@ -0,0 +1,30 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ALLOCATOR_H__
++#define __YAFFS_ALLOCATOR_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev);
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev);
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev);
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn);
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev);
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj);
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_bitmap.c
+@@ -0,0 +1,105 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_bitmap.h"
++#include "yaffs_trace.h"
++/*
++ * Chunk bitmap manipulations
++ */
++
++static Y_INLINE __u8 *yaffs_BlockBits(yaffs_dev_t *dev, int blk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
++ blk));
++ YBUG();
++ }
++ return dev->chunk_bits +
++ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
++}
++
++void yaffs_verify_chunk_bit_id(yaffs_dev_t *dev, int blk, int chunk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
++ chunk < 0 || chunk >= dev->param.chunks_per_block) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
++ blk, chunk));
++ YBUG();
++ }
++}
++
++void yaffs_clear_chunk_bits(yaffs_dev_t *dev, int blk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++ memset(blkBits, 0, dev->chunk_bit_stride);
++}
++
++void yaffs_clear_chunk_bit(yaffs_dev_t *dev, int blk, int chunk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++
++ blkBits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++void yaffs_set_chunk_bit(yaffs_dev_t *dev, int blk, int chunk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++
++ blkBits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++int yaffs_check_chunk_bit(yaffs_dev_t *dev, int blk, int chunk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++
++ return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++int yaffs_still_some_chunks(yaffs_dev_t *dev, int blk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++ int i;
++ for (i = 0; i < dev->chunk_bit_stride; i++) {
++ if (*blkBits)
++ return 1;
++ blkBits++;
++ }
++ return 0;
++}
++
++int yaffs_count_chunk_bits(yaffs_dev_t *dev, int blk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++ int i;
++ int n = 0;
++ for (i = 0; i < dev->chunk_bit_stride; i++) {
++ __u8 x = *blkBits;
++ while (x) {
++ if (x & 1)
++ n++;
++ x >>= 1;
++ }
++
++ blkBits++;
++ }
++ return n;
++}
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_bitmap.h
+@@ -0,0 +1,31 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++#ifndef __YAFFS_BITMAP_H__
++#define __YAFFS_BITMAP_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_chunk_bit_id(yaffs_dev_t *dev, int blk, int chunk);
++void yaffs_clear_chunk_bits(yaffs_dev_t *dev, int blk);
++void yaffs_clear_chunk_bit(yaffs_dev_t *dev, int blk, int chunk);
++void yaffs_set_chunk_bit(yaffs_dev_t *dev, int blk, int chunk);
++int yaffs_check_chunk_bit(yaffs_dev_t *dev, int blk, int chunk);
++int yaffs_still_some_chunks(yaffs_dev_t *dev, int blk);
++int yaffs_count_chunk_bits(yaffs_dev_t *dev, int blk);
++
++#endif
+--- a/fs/yaffs2/yaffs_checkptrw.c
++++ b/fs/yaffs2/yaffs_checkptrw.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -11,16 +11,12 @@
+ * published by the Free Software Foundation.
+ */
+
+-const char *yaffs_checkptrw_c_version =
+- "$Id: yaffs_checkptrw.c,v 1.18 2009-03-06 17:20:49 wookey Exp $";
+-
+-
+ #include "yaffs_checkptrw.h"
+ #include "yaffs_getblockinfo.h"
+
+-static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
++static int yaffs2_checkpt_space_ok(yaffs_dev_t *dev)
+ {
+- int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++ int blocksAvailable = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("checkpt blocks available = %d" TENDSTR),
+@@ -30,53 +26,56 @@ static int yaffs_CheckpointSpaceOk(yaffs
+ }
+
+
+-static int yaffs_CheckpointErase(yaffs_Device *dev)
++static int yaffs_checkpt_erase(yaffs_dev_t *dev)
+ {
+ int i;
+
+- if (!dev->eraseBlockInNAND)
++ if (!dev->param.erase_fn)
+ return 0;
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checking blocks %d to %d"TENDSTR),
+- dev->internalStartBlock, dev->internalEndBlock));
++ dev->internal_start_block, dev->internal_end_block));
+
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
+- if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, i);
++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("erasing checkpt block %d"TENDSTR), i));
+- if (dev->eraseBlockInNAND(dev, i - dev->blockOffset /* realign */)) {
+- bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- dev->nFreeChunks += dev->nChunksPerBlock;
++
++ dev->n_erasures++;
++
++ if (dev->param.erase_fn(dev, i - dev->block_offset /* realign */)) {
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else {
+- dev->markNANDBlockBad(dev, i);
+- bi->blockState = YAFFS_BLOCK_STATE_DEAD;
++ dev->param.bad_block_fn(dev, i);
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+- dev->blocksInCheckpoint = 0;
++ dev->blocks_in_checkpt = 0;
+
+ return 1;
+ }
+
+
+-static void yaffs_CheckpointFindNextErasedBlock(yaffs_Device *dev)
++static void yaffs2_checkpt_find_erased_block(yaffs_dev_t *dev)
+ {
+ int i;
+- int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++ int blocksAvailable = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
+- dev->nErasedBlocks, dev->nReservedBlocks, blocksAvailable, dev->checkpointNextBlock));
++ dev->n_erased_blocks, dev->param.n_reserved_blocks, blocksAvailable, dev->checkpt_next_block));
+
+- if (dev->checkpointNextBlock >= 0 &&
+- dev->checkpointNextBlock <= dev->internalEndBlock &&
++ if (dev->checkpt_next_block >= 0 &&
++ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocksAvailable > 0) {
+
+- for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
+- if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+- dev->checkpointNextBlock = i + 1;
+- dev->checkpointCurrentBlock = i;
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block; i++) {
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, i);
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ dev->checkpt_next_block = i + 1;
++ dev->checkpt_cur_block = i;
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("allocating checkpt block %d"TENDSTR), i));
+ return;
+ }
+@@ -84,34 +83,34 @@ static void yaffs_CheckpointFindNextEras
+ }
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("out of checkpt blocks"TENDSTR)));
+
+- dev->checkpointNextBlock = -1;
+- dev->checkpointCurrentBlock = -1;
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
+ }
+
+-static void yaffs_CheckpointFindNextCheckpointBlock(yaffs_Device *dev)
++static void yaffs2_checkpt_find_block(yaffs_dev_t *dev)
+ {
+ int i;
+- yaffs_ExtendedTags tags;
++ yaffs_ext_tags tags;
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
+- dev->blocksInCheckpoint, dev->checkpointNextBlock));
++ dev->blocks_in_checkpt, dev->checkpt_next_block));
+
+- if (dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
+- for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
+- int chunk = i * dev->nChunksPerBlock;
+- int realignedChunk = chunk - dev->chunkOffset;
++ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block; i++) {
++ int chunk = i * dev->param.chunks_per_block;
++ int realignedChunk = chunk - dev->chunk_offset;
+
+- dev->readChunkWithTagsFromNAND(dev, realignedChunk,
++ dev->param.read_chunk_tags_fn(dev, realignedChunk,
+ NULL, &tags);
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
+- i, tags.objectId, tags.sequenceNumber, tags.eccResult));
++ i, tags.obj_id, tags.seq_number, tags.ecc_result));
+
+- if (tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
++ if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+- dev->checkpointNextBlock = tags.objectId;
+- dev->checkpointCurrentBlock = i;
+- dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
+- dev->blocksInCheckpoint++;
++ dev->checkpt_next_block = tags.obj_id;
++ dev->checkpt_cur_block = i;
++ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
++ dev->blocks_in_checkpt++;
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found checkpt block %d"TENDSTR), i));
+ return;
+ }
+@@ -119,122 +118,127 @@ static void yaffs_CheckpointFindNextChec
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found no more checkpt blocks"TENDSTR)));
+
+- dev->checkpointNextBlock = -1;
+- dev->checkpointCurrentBlock = -1;
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
+ }
+
+
+-int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting)
++int yaffs2_checkpt_open(yaffs_dev_t *dev, int forWriting)
+ {
+
++
++ dev->checkpt_open_write = forWriting;
++
+ /* Got the functions we need? */
+- if (!dev->writeChunkWithTagsToNAND ||
+- !dev->readChunkWithTagsFromNAND ||
+- !dev->eraseBlockInNAND ||
+- !dev->markNANDBlockBad)
++ if (!dev->param.write_chunk_tags_fn ||
++ !dev->param.read_chunk_tags_fn ||
++ !dev->param.erase_fn ||
++ !dev->param.bad_block_fn)
+ return 0;
+
+- if (forWriting && !yaffs_CheckpointSpaceOk(dev))
++ if (forWriting && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+- if (!dev->checkpointBuffer)
+- dev->checkpointBuffer = YMALLOC_DMA(dev->totalBytesPerChunk);
+- if (!dev->checkpointBuffer)
++ if (!dev->checkpt_buffer)
++ dev->checkpt_buffer = YMALLOC_DMA(dev->param.total_bytes_per_chunk);
++ if (!dev->checkpt_buffer)
+ return 0;
+
+
+- dev->checkpointPageSequence = 0;
+-
+- dev->checkpointOpenForWrite = forWriting;
+-
+- dev->checkpointByteCount = 0;
+- dev->checkpointSum = 0;
+- dev->checkpointXor = 0;
+- dev->checkpointCurrentBlock = -1;
+- dev->checkpointCurrentChunk = -1;
+- dev->checkpointNextBlock = dev->internalStartBlock;
++ dev->checkpt_page_seq = 0;
++ dev->checkpt_byte_count = 0;
++ dev->checkpt_sum = 0;
++ dev->checkpt_xor = 0;
++ dev->checkpt_cur_block = -1;
++ dev->checkpt_cur_chunk = -1;
++ dev->checkpt_next_block = dev->internal_start_block;
+
+ /* Erase all the blocks in the checkpoint area */
+ if (forWriting) {
+- memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
+- dev->checkpointByteOffset = 0;
+- return yaffs_CheckpointErase(dev);
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++ dev->checkpt_byte_offs = 0;
++ return yaffs_checkpt_erase(dev);
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+- dev->checkpointByteOffset = dev->nDataBytesPerChunk;
++ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+ * going to be way more than we need */
+- dev->blocksInCheckpoint = 0;
+- dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
+- dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
+- for (i = 0; i < dev->checkpointMaxBlocks; i++)
+- dev->checkpointBlockList[i] = -1;
++ dev->blocks_in_checkpt = 0;
++ dev->checkpt_max_blocks = (dev->internal_end_block - dev->internal_start_block)/16 + 2;
++ dev->checkpt_block_list = YMALLOC(sizeof(int) * dev->checkpt_max_blocks);
++ if(!dev->checkpt_block_list)
++ return 0;
++
++ for (i = 0; i < dev->checkpt_max_blocks; i++)
++ dev->checkpt_block_list[i] = -1;
+ }
+
+ return 1;
+ }
+
+-int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum)
++int yaffs2_get_checkpt_sum(yaffs_dev_t *dev, __u32 *sum)
+ {
+ __u32 compositeSum;
+- compositeSum = (dev->checkpointSum << 8) | (dev->checkpointXor & 0xFF);
++ compositeSum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
+ *sum = compositeSum;
+ return 1;
+ }
+
+-static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
++static int yaffs2_checkpt_flush_buffer(yaffs_dev_t *dev)
+ {
+ int chunk;
+ int realignedChunk;
+
+- yaffs_ExtendedTags tags;
++ yaffs_ext_tags tags;
+
+- if (dev->checkpointCurrentBlock < 0) {
+- yaffs_CheckpointFindNextErasedBlock(dev);
+- dev->checkpointCurrentChunk = 0;
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_erased_block(dev);
++ dev->checkpt_cur_chunk = 0;
+ }
+
+- if (dev->checkpointCurrentBlock < 0)
++ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+- tags.chunkDeleted = 0;
+- tags.objectId = dev->checkpointNextBlock; /* Hint to next place to look */
+- tags.chunkId = dev->checkpointPageSequence + 1;
+- tags.sequenceNumber = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+- tags.byteCount = dev->nDataBytesPerChunk;
+- if (dev->checkpointCurrentChunk == 0) {
++ tags.is_deleted = 0;
++ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
++ tags.chunk_id = dev->checkpt_page_seq + 1;
++ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
++ tags.n_bytes = dev->data_bytes_per_chunk;
++ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointCurrentBlock);
+- bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+- dev->blocksInCheckpoint++;
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, dev->checkpt_cur_block);
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ dev->blocks_in_checkpt++;
+ }
+
+- chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
++ chunk = dev->checkpt_cur_block * dev->param.chunks_per_block + dev->checkpt_cur_chunk;
+
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
+- chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk, tags.objectId, tags.chunkId));
++ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk, tags.obj_id, tags.chunk_id));
+
+- realignedChunk = chunk - dev->chunkOffset;
++ realignedChunk = chunk - dev->chunk_offset;
+
+- dev->writeChunkWithTagsToNAND(dev, realignedChunk,
+- dev->checkpointBuffer, &tags);
+- dev->checkpointByteOffset = 0;
+- dev->checkpointPageSequence++;
+- dev->checkpointCurrentChunk++;
+- if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock) {
+- dev->checkpointCurrentChunk = 0;
+- dev->checkpointCurrentBlock = -1;
++ dev->n_page_writes++;
++
++ dev->param.write_chunk_tags_fn(dev, realignedChunk,
++ dev->checkpt_buffer, &tags);
++ dev->checkpt_byte_offs = 0;
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
++ dev->checkpt_cur_chunk = 0;
++ dev->checkpt_cur_block = -1;
+ }
+- memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ return 1;
+ }
+
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes)
++int yaffs2_checkpt_wr(yaffs_dev_t *dev, const void *data, int n_bytes)
+ {
+ int i = 0;
+ int ok = 1;
+@@ -244,36 +248,36 @@ int yaffs_CheckpointWrite(yaffs_Device *
+
+
+
+- if (!dev->checkpointBuffer)
++ if (!dev->checkpt_buffer)
+ return 0;
+
+- if (!dev->checkpointOpenForWrite)
++ if (!dev->checkpt_open_write)
+ return -1;
+
+- while (i < nBytes && ok) {
+- dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes;
+- dev->checkpointSum += *dataBytes;
+- dev->checkpointXor ^= *dataBytes;
++ while (i < n_bytes && ok) {
++ dev->checkpt_buffer[dev->checkpt_byte_offs] = *dataBytes;
++ dev->checkpt_sum += *dataBytes;
++ dev->checkpt_xor ^= *dataBytes;
+
+- dev->checkpointByteOffset++;
++ dev->checkpt_byte_offs++;
+ i++;
+ dataBytes++;
+- dev->checkpointByteCount++;
++ dev->checkpt_byte_count++;
+
+
+- if (dev->checkpointByteOffset < 0 ||
+- dev->checkpointByteOffset >= dev->nDataBytesPerChunk)
+- ok = yaffs_CheckpointFlushBuffer(dev);
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
++ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+ }
+
+-int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
++int yaffs2_checkpt_rd(yaffs_dev_t *dev, void *data, int n_bytes)
+ {
+ int i = 0;
+ int ok = 1;
+- yaffs_ExtendedTags tags;
++ yaffs_ext_tags tags;
+
+
+ int chunk;
+@@ -281,113 +285,116 @@ int yaffs_CheckpointRead(yaffs_Device *d
+
+ __u8 *dataBytes = (__u8 *)data;
+
+- if (!dev->checkpointBuffer)
++ if (!dev->checkpt_buffer)
+ return 0;
+
+- if (dev->checkpointOpenForWrite)
++ if (dev->checkpt_open_write)
+ return -1;
+
+- while (i < nBytes && ok) {
++ while (i < n_bytes && ok) {
+
+
+- if (dev->checkpointByteOffset < 0 ||
+- dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+- if (dev->checkpointCurrentBlock < 0) {
+- yaffs_CheckpointFindNextCheckpointBlock(dev);
+- dev->checkpointCurrentChunk = 0;
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_block(dev);
++ dev->checkpt_cur_chunk = 0;
+ }
+
+- if (dev->checkpointCurrentBlock < 0)
++ if (dev->checkpt_cur_block < 0)
+ ok = 0;
+ else {
+- chunk = dev->checkpointCurrentBlock *
+- dev->nChunksPerBlock +
+- dev->checkpointCurrentChunk;
+-
+- realignedChunk = chunk - dev->chunkOffset;
++ chunk = dev->checkpt_cur_block *
++ dev->param.chunks_per_block +
++ dev->checkpt_cur_chunk;
++
++ realignedChunk = chunk - dev->chunk_offset;
++
++ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ /* printf("read checkpoint page %d\n",dev->checkpointPage); */
+- dev->readChunkWithTagsFromNAND(dev,
++ dev->param.read_chunk_tags_fn(dev,
+ realignedChunk,
+- dev->checkpointBuffer,
++ dev->checkpt_buffer,
+ &tags);
+
+- if (tags.chunkId != (dev->checkpointPageSequence + 1) ||
+- tags.eccResult > YAFFS_ECC_RESULT_FIXED ||
+- tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ ok = 0;
+
+- dev->checkpointByteOffset = 0;
+- dev->checkpointPageSequence++;
+- dev->checkpointCurrentChunk++;
++ dev->checkpt_byte_offs = 0;
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
+
+- if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
+- dev->checkpointCurrentBlock = -1;
++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block)
++ dev->checkpt_cur_block = -1;
+ }
+ }
+
+ if (ok) {
+- *dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
+- dev->checkpointSum += *dataBytes;
+- dev->checkpointXor ^= *dataBytes;
+- dev->checkpointByteOffset++;
++ *dataBytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
++ dev->checkpt_sum += *dataBytes;
++ dev->checkpt_xor ^= *dataBytes;
++ dev->checkpt_byte_offs++;
+ i++;
+ dataBytes++;
+- dev->checkpointByteCount++;
++ dev->checkpt_byte_count++;
+ }
+ }
+
+ return i;
+ }
+
+-int yaffs_CheckpointClose(yaffs_Device *dev)
++int yaffs_checkpt_close(yaffs_dev_t *dev)
+ {
+
+- if (dev->checkpointOpenForWrite) {
+- if (dev->checkpointByteOffset != 0)
+- yaffs_CheckpointFlushBuffer(dev);
+- } else {
++ if (dev->checkpt_open_write) {
++ if (dev->checkpt_byte_offs != 0)
++ yaffs2_checkpt_flush_buffer(dev);
++ } else if(dev->checkpt_block_list){
+ int i;
+- for (i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointBlockList[i]);
+- if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
+- bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
++ for (i = 0; i < dev->blocks_in_checkpt && dev->checkpt_block_list[i] >= 0; i++) {
++ int blk = dev->checkpt_block_list[i];
++ yaffs_block_info_t *bi = NULL;
++ if( dev->internal_start_block <= blk && blk <= dev->internal_end_block)
++ bi = yaffs_get_block_info(dev, blk);
++ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+ /* Todo this looks odd... */
+ }
+ }
+- YFREE(dev->checkpointBlockList);
+- dev->checkpointBlockList = NULL;
++ YFREE(dev->checkpt_block_list);
++ dev->checkpt_block_list = NULL;
+ }
+
+- dev->nFreeChunks -= dev->blocksInCheckpoint * dev->nChunksPerBlock;
+- dev->nErasedBlocks -= dev->blocksInCheckpoint;
++ dev->n_free_chunks -= dev->blocks_in_checkpt * dev->param.chunks_per_block;
++ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint byte count %d" TENDSTR),
+- dev->checkpointByteCount));
++ dev->checkpt_byte_count));
+
+- if (dev->checkpointBuffer) {
++ if (dev->checkpt_buffer) {
+ /* free the buffer */
+- YFREE(dev->checkpointBuffer);
+- dev->checkpointBuffer = NULL;
++ YFREE(dev->checkpt_buffer);
++ dev->checkpt_buffer = NULL;
+ return 1;
+ } else
+ return 0;
+ }
+
+-int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
++int yaffs2_checkpt_invalidate_stream(yaffs_dev_t *dev)
+ {
+- /* Erase the first checksum block */
+-
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate"TENDSTR)));
++ /* Erase the checkpoint data */
+
+- if (!yaffs_CheckpointSpaceOk(dev))
+- return 0;
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate of %d blocks"TENDSTR),
++ dev->blocks_in_checkpt));
+
+- return yaffs_CheckpointErase(dev);
++ return yaffs_checkpt_erase(dev);
+ }
+
+
+--- a/fs/yaffs2/yaffs_checkptrw.h
++++ b/fs/yaffs2/yaffs_checkptrw.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -18,18 +18,17 @@
+
+ #include "yaffs_guts.h"
+
+-int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
++int yaffs2_checkpt_open(yaffs_dev_t *dev, int forWriting);
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes);
++int yaffs2_checkpt_wr(yaffs_dev_t *dev, const void *data, int n_bytes);
+
+-int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes);
++int yaffs2_checkpt_rd(yaffs_dev_t *dev, void *data, int n_bytes);
+
+-int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
++int yaffs2_get_checkpt_sum(yaffs_dev_t *dev, __u32 *sum);
+
+-int yaffs_CheckpointClose(yaffs_Device *dev);
++int yaffs_checkpt_close(yaffs_dev_t *dev);
+
+-int yaffs_CheckpointInvalidateStream(yaffs_Device *dev);
++int yaffs2_checkpt_invalidate_stream(yaffs_dev_t *dev);
+
+
+ #endif
+-
+--- a/fs/yaffs2/yaffs_ecc.c
++++ b/fs/yaffs2/yaffs_ecc.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -28,9 +28,6 @@
+ * this bytes influence on the line parity.
+ */
+
+-const char *yaffs_ecc_c_version =
+- "$Id: yaffs_ecc.c,v 1.11 2009-03-06 17:20:50 wookey Exp $";
+-
+ #include "yportenv.h"
+
+ #include "yaffs_ecc.h"
+@@ -72,7 +69,7 @@ static const unsigned char column_parity
+
+ /* Count the bits in an unsigned char or a U32 */
+
+-static int yaffs_CountBits(unsigned char x)
++static int yaffs_count_bits(unsigned char x)
+ {
+ int r = 0;
+ while (x) {
+@@ -83,7 +80,7 @@ static int yaffs_CountBits(unsigned char
+ return r;
+ }
+
+-static int yaffs_CountBits32(unsigned x)
++static int yaffs_count_bits32(unsigned x)
+ {
+ int r = 0;
+ while (x) {
+@@ -95,7 +92,7 @@ static int yaffs_CountBits32(unsigned x)
+ }
+
+ /* Calculate the ECC for a 256-byte block of data */
+-void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc)
++void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc)
+ {
+ unsigned int i;
+
+@@ -166,7 +163,7 @@ void yaffs_ECCCalculate(const unsigned c
+
+ /* Correct the ECC on a 256 byte block of data */
+
+-int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+ {
+ unsigned char d0, d1, d2; /* deltas */
+@@ -226,9 +223,9 @@ int yaffs_ECCCorrect(unsigned char *data
+ return 1; /* Corrected the error */
+ }
+
+- if ((yaffs_CountBits(d0) +
+- yaffs_CountBits(d1) +
+- yaffs_CountBits(d2)) == 1) {
++ if ((yaffs_count_bits(d0) +
++ yaffs_count_bits(d1) +
++ yaffs_count_bits(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+@@ -248,7 +245,7 @@ int yaffs_ECCCorrect(unsigned char *data
+ /*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+-void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *eccOther)
+ {
+ unsigned int i;
+@@ -258,7 +255,7 @@ void yaffs_ECCCalculateOther(const unsig
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+- for (i = 0; i < nBytes; i++) {
++ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+@@ -275,7 +272,7 @@ void yaffs_ECCCalculateOther(const unsig
+ eccOther->lineParityPrime = line_parity_prime;
+ }
+
+-int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *read_ecc,
+ const yaffs_ECCOther *test_ecc)
+ {
+@@ -304,7 +301,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ if (cDelta & 0x02)
+ bit |= 0x01;
+
+- if (lDelta >= nBytes)
++ if (lDelta >= n_bytes)
+ return -1;
+
+ data[lDelta] ^= (1 << bit);
+@@ -312,8 +309,8 @@ int yaffs_ECCCorrectOther(unsigned char
+ return 1; /* corrected */
+ }
+
+- if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
+- yaffs_CountBits(cDelta)) == 1) {
++ if ((yaffs_count_bits32(lDelta) + yaffs_count_bits32(lDeltaPrime) +
++ yaffs_count_bits(cDelta)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+--- a/fs/yaffs2/yaffs_ecc.h
++++ b/fs/yaffs2/yaffs_ecc.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -32,13 +32,13 @@ typedef struct {
+ unsigned lineParityPrime;
+ } yaffs_ECCOther;
+
+-void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
+-int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc);
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+-void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *ecc);
+-int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *read_ecc,
+ const yaffs_ECCOther *test_ecc);
+ #endif
+--- a/fs/yaffs2/yaffs_fs.c
++++ /dev/null
+@@ -1,2529 +0,0 @@
+-/*
+- * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+- *
+- * Copyright (C) 2002-2009 Aleph One Ltd.
+- * for Toby Churchill Ltd and Brightstar Engineering
+- *
+- * Created by Charles Manning <charles@aleph1.co.uk>
+- * Acknowledgements:
+- * Luc van OostenRyck for numerous patches.
+- * Nick Bane for numerous patches.
+- * Nick Bane for 2.5/2.6 integration.
+- * Andras Toth for mknod rdev issue.
+- * Michael Fischer for finding the problem with inode inconsistency.
+- * Some code bodily lifted from JFFS
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-/*
+- *
+- * This is the file system front-end to YAFFS that hooks it up to
+- * the VFS.
+- *
+- * Special notes:
+- * >> 2.4: sb->u.generic_sbp points to the yaffs_Device associated with
+- * this superblock
+- * >> 2.6: sb->s_fs_info points to the yaffs_Device associated with this
+- * superblock
+- * >> inode->u.generic_ip points to the associated yaffs_Object.
+- */
+-
+-const char *yaffs_fs_c_version =
+- "$Id: yaffs_fs.c,v 1.79 2009-03-17 01:12:00 wookey Exp $";
+-extern const char *yaffs_guts_c_version;
+-
+-#include <linux/version.h>
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+-#include <linux/config.h>
+-#endif
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/init.h>
+-#include <linux/fs.h>
+-#include <linux/proc_fs.h>
+-#include <linux/smp_lock.h>
+-#include <linux/pagemap.h>
+-#include <linux/mtd/mtd.h>
+-#include <linux/interrupt.h>
+-#include <linux/string.h>
+-#include <linux/ctype.h>
+-
+-#include "asm/div64.h"
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-
+-#include <linux/statfs.h> /* Added NCB 15-8-2003 */
+-#include <linux/statfs.h>
+-#define UnlockPage(p) unlock_page(p)
+-#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+-
+-/* FIXME: use sb->s_id instead ? */
+-#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+-
+-#else
+-
+-#include <linux/locks.h>
+-#define BDEVNAME_SIZE 0
+-#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+-/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+-#define __user
+-#endif
+-
+-#endif
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+-#define YPROC_ROOT (&proc_root)
+-#else
+-#define YPROC_ROOT NULL
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-#define WRITE_SIZE_STR "writesize"
+-#define WRITE_SIZE(mtd) ((mtd)->writesize)
+-#else
+-#define WRITE_SIZE_STR "oobblock"
+-#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+-#define YAFFS_USE_WRITE_BEGIN_END 1
+-#else
+-#define YAFFS_USE_WRITE_BEGIN_END 0
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+-static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+-{
+- uint64_t result = partition_size;
+- do_div(result, block_size);
+- return (uint32_t)result;
+-}
+-#else
+-#define YCALCBLOCKS(s, b) ((s)/(b))
+-#endif
+-
+-#include <linux/uaccess.h>
+-
+-#include "yportenv.h"
+-#include "yaffs_guts.h"
+-
+-#include <linux/mtd/mtd.h>
+-#include "yaffs_mtdif.h"
+-#include "yaffs_mtdif1.h"
+-#include "yaffs_mtdif2.h"
+-
+-unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
+-unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+-unsigned int yaffs_auto_checkpoint = 1;
+-
+-/* Module Parameters */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-module_param(yaffs_traceMask, uint, 0644);
+-module_param(yaffs_wr_attempts, uint, 0644);
+-module_param(yaffs_auto_checkpoint, uint, 0644);
+-#else
+-MODULE_PARM(yaffs_traceMask, "i");
+-MODULE_PARM(yaffs_wr_attempts, "i");
+-MODULE_PARM(yaffs_auto_checkpoint, "i");
+-#endif
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+-/* use iget and read_inode */
+-#define Y_IGET(sb, inum) iget((sb), (inum))
+-static void yaffs_read_inode(struct inode *inode);
+-
+-#else
+-/* Call local equivalent */
+-#define YAFFS_USE_OWN_IGET
+-#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
+-
+-static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
+-#endif
+-
+-/*#define T(x) printk x */
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+-#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
+-#else
+-#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
+-#endif
+-
+-#define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
+-#define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->s_fs_info)
+-#else
+-#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->u.generic_sbp)
+-#endif
+-
+-static void yaffs_put_super(struct super_block *sb);
+-
+-static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t *pos);
+-static ssize_t yaffs_hold_space(struct file *f);
+-static void yaffs_release_space(struct file *f);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_file_flush(struct file *file, fl_owner_t id);
+-#else
+-static int yaffs_file_flush(struct file *file);
+-#endif
+-
+-static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync);
+-
+-static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+- struct nameidata *n);
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n);
+-#else
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
+-#endif
+-static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry);
+-static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
+-static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname);
+-static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t dev);
+-#else
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int dev);
+-#endif
+-static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry);
+-static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_sync_fs(struct super_block *sb, int wait);
+-static void yaffs_write_super(struct super_block *sb);
+-#else
+-static int yaffs_sync_fs(struct super_block *sb);
+-static int yaffs_write_super(struct super_block *sb);
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
+-#else
+-static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
+-#endif
+-
+-#ifdef YAFFS_HAS_PUT_INODE
+-static void yaffs_put_inode(struct inode *inode);
+-#endif
+-
+-static void yaffs_delete_inode(struct inode *);
+-static void yaffs_clear_inode(struct inode *);
+-
+-static int yaffs_readpage(struct file *file, struct page *page);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
+-#else
+-static int yaffs_writepage(struct page *page);
+-#endif
+-
+-
+-#if (YAFFS_USE_WRITE_BEGIN_END != 0)
+-static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata);
+-static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *pg, void *fsdadata);
+-#else
+-static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to);
+-static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to);
+-
+-#endif
+-
+-static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
+- int buflen);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+-static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+-#else
+-static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+-#endif
+-
+-static struct address_space_operations yaffs_file_address_operations = {
+- .readpage = yaffs_readpage,
+- .writepage = yaffs_writepage,
+-#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+- .write_begin = yaffs_write_begin,
+- .write_end = yaffs_write_end,
+-#else
+- .prepare_write = yaffs_prepare_write,
+- .commit_write = yaffs_commit_write,
+-#endif
+-};
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+-static const struct file_operations yaffs_file_operations = {
+- .read = do_sync_read,
+- .write = do_sync_write,
+- .aio_read = generic_file_aio_read,
+- .aio_write = generic_file_aio_write,
+- .mmap = generic_file_mmap,
+- .flush = yaffs_file_flush,
+- .fsync = yaffs_sync_object,
+- .splice_read = generic_file_splice_read,
+- .splice_write = generic_file_splice_write,
+- .llseek = generic_file_llseek,
+-};
+-
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+-
+-static const struct file_operations yaffs_file_operations = {
+- .read = do_sync_read,
+- .write = do_sync_write,
+- .aio_read = generic_file_aio_read,
+- .aio_write = generic_file_aio_write,
+- .mmap = generic_file_mmap,
+- .flush = yaffs_file_flush,
+- .fsync = yaffs_sync_object,
+- .sendfile = generic_file_sendfile,
+-};
+-
+-#else
+-
+-static const struct file_operations yaffs_file_operations = {
+- .read = generic_file_read,
+- .write = generic_file_write,
+- .mmap = generic_file_mmap,
+- .flush = yaffs_file_flush,
+- .fsync = yaffs_sync_object,
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- .sendfile = generic_file_sendfile,
+-#endif
+-};
+-#endif
+-
+-static const struct inode_operations yaffs_file_inode_operations = {
+- .setattr = yaffs_setattr,
+-};
+-
+-static const struct inode_operations yaffs_symlink_inode_operations = {
+- .readlink = yaffs_readlink,
+- .follow_link = yaffs_follow_link,
+- .setattr = yaffs_setattr,
+-};
+-
+-static const struct inode_operations yaffs_dir_inode_operations = {
+- .create = yaffs_create,
+- .lookup = yaffs_lookup,
+- .link = yaffs_link,
+- .unlink = yaffs_unlink,
+- .symlink = yaffs_symlink,
+- .mkdir = yaffs_mkdir,
+- .rmdir = yaffs_unlink,
+- .mknod = yaffs_mknod,
+- .rename = yaffs_rename,
+- .setattr = yaffs_setattr,
+-};
+-
+-static const struct file_operations yaffs_dir_operations = {
+- .read = generic_read_dir,
+- .readdir = yaffs_readdir,
+- .fsync = yaffs_sync_object,
+-};
+-
+-static const struct super_operations yaffs_super_ops = {
+- .statfs = yaffs_statfs,
+-
+-#ifndef YAFFS_USE_OWN_IGET
+- .read_inode = yaffs_read_inode,
+-#endif
+-#ifdef YAFFS_HAS_PUT_INODE
+- .put_inode = yaffs_put_inode,
+-#endif
+- .put_super = yaffs_put_super,
+- .delete_inode = yaffs_delete_inode,
+- .clear_inode = yaffs_clear_inode,
+- .sync_fs = yaffs_sync_fs,
+- .write_super = yaffs_write_super,
+-};
+-
+-static void yaffs_GrossLock(yaffs_Device *dev)
+-{
+- T(YAFFS_TRACE_OS, ("yaffs locking %p\n", current));
+- down(&dev->grossLock);
+- T(YAFFS_TRACE_OS, ("yaffs locked %p\n", current));
+-}
+-
+-static void yaffs_GrossUnlock(yaffs_Device *dev)
+-{
+- T(YAFFS_TRACE_OS, ("yaffs unlocking %p\n", current));
+- up(&dev->grossLock);
+-}
+-
+-static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
+- int buflen)
+-{
+- unsigned char *alias;
+- int ret;
+-
+- yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (!alias)
+- return -ENOMEM;
+-
+- ret = vfs_readlink(dentry, buffer, buflen, alias);
+- kfree(alias);
+- return ret;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+-static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+-#else
+-static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+-#endif
+-{
+- unsigned char *alias;
+- int ret;
+- yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (!alias) {
+- ret = -ENOMEM;
+- goto out;
+- }
+-
+- ret = vfs_follow_link(nd, alias);
+- kfree(alias);
+-out:
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+- return ERR_PTR(ret);
+-#else
+- return ret;
+-#endif
+-}
+-
+-struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object *obj);
+-
+-/*
+- * Lookup is used to find objects in the fs
+- */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n)
+-#else
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+-#endif
+-{
+- yaffs_Object *obj;
+- struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
+-
+- yaffs_Device *dev = yaffs_InodeToObject(dir)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_lookup for %d:%s\n",
+- yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
+-
+- obj = yaffs_FindObjectByName(yaffs_InodeToObject(dir),
+- dentry->d_name.name);
+-
+- obj = yaffs_GetEquivalentObject(obj); /* in case it was a hardlink */
+-
+- /* Can't hold gross lock when calling yaffs_get_inode() */
+- yaffs_GrossUnlock(dev);
+-
+- if (obj) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_lookup found %d\n", obj->objectId));
+-
+- inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+-
+- if (inode) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_loookup dentry \n"));
+-/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
+- * d_add even if NULL inode */
+-#if 0
+- /*dget(dentry); // try to solve directory bug */
+- d_add(dentry, inode);
+-
+- /* return dentry; */
+- return NULL;
+-#endif
+- }
+-
+- } else {
+- T(YAFFS_TRACE_OS, ("yaffs_lookup not found\n"));
+-
+- }
+-
+-/* added NCB for 2.5/6 compatability - forces add even if inode is
+- * NULL which creates dentry hash */
+- d_add(dentry, inode);
+-
+- return NULL;
+-}
+-
+-
+-#ifdef YAFFS_HAS_PUT_INODE
+-
+-/* For now put inode is just for debugging
+- * Put inode is called when the inode **structure** is put.
+- */
+-static void yaffs_put_inode(struct inode *inode)
+-{
+- T(YAFFS_TRACE_OS,
+- ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count)));
+-
+-}
+-#endif
+-
+-/* clear is called to tell the fs to release any per-inode data it holds */
+-static void yaffs_clear_inode(struct inode *inode)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+- obj = yaffs_InodeToObject(inode);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
+-
+- if (obj) {
+- dev = obj->myDev;
+- yaffs_GrossLock(dev);
+-
+- /* Clear the association between the inode and
+- * the yaffs_Object.
+- */
+- obj->myInode = NULL;
+- yaffs_InodeToObjectLV(inode) = NULL;
+-
+- /* If the object freeing was deferred, then the real
+- * free happens now.
+- * This should fix the inode inconsistency problem.
+- */
+-
+- yaffs_HandleDeferedFree(obj);
+-
+- yaffs_GrossUnlock(dev);
+- }
+-
+-}
+-
+-/* delete is called when the link count is zero and the inode
+- * is put (ie. nobody wants to know about it anymore, time to
+- * delete the file).
+- * NB Must call clear_inode()
+- */
+-static void yaffs_delete_inode(struct inode *inode)
+-{
+- yaffs_Object *obj = yaffs_InodeToObject(inode);
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
+-
+- if (obj) {
+- dev = obj->myDev;
+- yaffs_GrossLock(dev);
+- yaffs_DeleteObject(obj);
+- yaffs_GrossUnlock(dev);
+- }
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+- truncate_inode_pages(&inode->i_data, 0);
+-#endif
+- clear_inode(inode);
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_file_flush(struct file *file, fl_owner_t id)
+-#else
+-static int yaffs_file_flush(struct file *file)
+-#endif
+-{
+- yaffs_Object *obj = yaffs_DentryToObject(file->f_dentry);
+-
+- yaffs_Device *dev = obj->myDev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_flush object %d (%s)\n", obj->objectId,
+- obj->dirty ? "dirty" : "clean"));
+-
+- yaffs_GrossLock(dev);
+-
+- yaffs_FlushFile(obj, 1);
+-
+- yaffs_GrossUnlock(dev);
+-
+- return 0;
+-}
+-
+-static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+-{
+- /* Lifted from jffs2 */
+-
+- yaffs_Object *obj;
+- unsigned char *pg_buf;
+- int ret;
+-
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_readpage at %08x, size %08x\n",
+- (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+- (unsigned)PAGE_CACHE_SIZE));
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- BUG_ON(!PageLocked(pg));
+-#else
+- if (!PageLocked(pg))
+- PAGE_BUG(pg);
+-#endif
+-
+- pg_buf = kmap(pg);
+- /* FIXME: Can kmap fail? */
+-
+- yaffs_GrossLock(dev);
+-
+- ret = yaffs_ReadDataFromFile(obj, pg_buf,
+- pg->index << PAGE_CACHE_SHIFT,
+- PAGE_CACHE_SIZE);
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (ret >= 0)
+- ret = 0;
+-
+- if (ret) {
+- ClearPageUptodate(pg);
+- SetPageError(pg);
+- } else {
+- SetPageUptodate(pg);
+- ClearPageError(pg);
+- }
+-
+- flush_dcache_page(pg);
+- kunmap(pg);
+-
+- T(YAFFS_TRACE_OS, ("yaffs_readpage done\n"));
+- return ret;
+-}
+-
+-static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+-{
+- int ret = yaffs_readpage_nolock(f, pg);
+- UnlockPage(pg);
+- return ret;
+-}
+-
+-static int yaffs_readpage(struct file *f, struct page *pg)
+-{
+- return yaffs_readpage_unlock(f, pg);
+-}
+-
+-/* writepage inspired by/stolen from smbfs */
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+-#else
+-static int yaffs_writepage(struct page *page)
+-#endif
+-{
+- struct address_space *mapping = page->mapping;
+- loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
+- struct inode *inode;
+- unsigned long end_index;
+- char *buffer;
+- yaffs_Object *obj;
+- int nWritten = 0;
+- unsigned nBytes;
+-
+- if (!mapping)
+- BUG();
+- inode = mapping->host;
+- if (!inode)
+- BUG();
+-
+- if (offset > inode->i_size) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_writepage at %08x, inode size = %08x!!!\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT),
+- (unsigned)inode->i_size));
+- T(YAFFS_TRACE_OS,
+- (" -> don't care!!\n"));
+- unlock_page(page);
+- return 0;
+- }
+-
+- end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+-
+- /* easy case */
+- if (page->index < end_index)
+- nBytes = PAGE_CACHE_SIZE;
+- else
+- nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
+-
+- get_page(page);
+-
+- buffer = kmap(page);
+-
+- obj = yaffs_InodeToObject(inode);
+- yaffs_GrossLock(obj->myDev);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_writepage at %08x, size %08x\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
+- T(YAFFS_TRACE_OS,
+- ("writepag0: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+-
+- nWritten = yaffs_WriteDataToFile(obj, buffer,
+- page->index << PAGE_CACHE_SHIFT, nBytes, 0);
+-
+- T(YAFFS_TRACE_OS,
+- ("writepag1: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+-
+- yaffs_GrossUnlock(obj->myDev);
+-
+- kunmap(page);
+- SetPageUptodate(page);
+- UnlockPage(page);
+- put_page(page);
+-
+- return (nWritten == nBytes) ? 0 : -ENOSPC;
+-}
+-
+-
+-#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+-static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata)
+-{
+- struct page *pg = NULL;
+- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+- uint32_t offset = pos & (PAGE_CACHE_SIZE - 1);
+- uint32_t to = offset + len;
+-
+- int ret = 0;
+- int space_held = 0;
+-
+- T(YAFFS_TRACE_OS, ("start yaffs_write_begin\n"));
+- /* Get a page */
+-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)
+- pg = grab_cache_page_write_begin(mapping, index, flags);
+-#else
+- pg = __grab_cache_page(mapping, index);
+-#endif
+-
+- *pagep = pg;
+- if (!pg) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- /* Get fs space */
+- space_held = yaffs_hold_space(filp);
+-
+- if (!space_held) {
+- ret = -ENOSPC;
+- goto out;
+- }
+-
+- /* Update page if required */
+-
+- if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+- ret = yaffs_readpage_nolock(filp, pg);
+-
+- if (ret)
+- goto out;
+-
+- /* Happy path return */
+- T(YAFFS_TRACE_OS, ("end yaffs_write_begin - ok\n"));
+-
+- return 0;
+-
+-out:
+- T(YAFFS_TRACE_OS, ("end yaffs_write_begin fail returning %d\n", ret));
+- if (space_held)
+- yaffs_release_space(filp);
+- if (pg) {
+- unlock_page(pg);
+- page_cache_release(pg);
+- }
+- return ret;
+-}
+-
+-#else
+-
+-static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to)
+-{
+- T(YAFFS_TRACE_OS, ("yaffs_prepair_write\n"));
+-
+- if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+- return yaffs_readpage_nolock(f, pg);
+- return 0;
+-}
+-#endif
+-
+-#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+-static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *pg, void *fsdadata)
+-{
+- int ret = 0;
+- void *addr, *kva;
+- uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+-
+- kva = kmap(pg);
+- addr = kva + offset_into_page;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_write_end addr %x pos %x nBytes %d\n",
+- (unsigned) addr,
+- (int)pos, copied));
+-
+- ret = yaffs_file_write(filp, addr, copied, &pos);
+-
+- if (ret != copied) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_write_end not same size ret %d copied %d\n",
+- ret, copied));
+- SetPageError(pg);
+- ClearPageUptodate(pg);
+- } else {
+- SetPageUptodate(pg);
+- }
+-
+- kunmap(pg);
+-
+- yaffs_release_space(filp);
+- unlock_page(pg);
+- page_cache_release(pg);
+- return ret;
+-}
+-#else
+-
+-static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to)
+-{
+- void *addr, *kva;
+-
+- loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+- int nBytes = to - offset;
+- int nWritten;
+-
+- unsigned spos = pos;
+- unsigned saddr;
+-
+- kva = kmap(pg);
+- addr = kva + offset;
+-
+- saddr = (unsigned) addr;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_commit_write addr %x pos %x nBytes %d\n",
+- saddr, spos, nBytes));
+-
+- nWritten = yaffs_file_write(f, addr, nBytes, &pos);
+-
+- if (nWritten != nBytes) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_commit_write not same size nWritten %d nBytes %d\n",
+- nWritten, nBytes));
+- SetPageError(pg);
+- ClearPageUptodate(pg);
+- } else {
+- SetPageUptodate(pg);
+- }
+-
+- kunmap(pg);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_commit_write returning %d\n",
+- nWritten == nBytes ? 0 : nWritten));
+-
+- return nWritten == nBytes ? 0 : nWritten;
+-}
+-#endif
+-
+-
+-static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object *obj)
+-{
+- if (inode && obj) {
+-
+-
+- /* Check mode against the variant type and attempt to repair if broken. */
+- __u32 mode = obj->yst_mode;
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- if (!S_ISREG(mode)) {
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFREG;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- if (!S_ISLNK(mode)) {
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFLNK;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- if (!S_ISDIR(mode)) {
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFDIR;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- default:
+- /* TODO? */
+- break;
+- }
+-
+- inode->i_flags |= S_NOATIME;
+-
+- inode->i_ino = obj->objectId;
+- inode->i_mode = obj->yst_mode;
+- inode->i_uid = obj->yst_uid;
+- inode->i_gid = obj->yst_gid;
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+- inode->i_blksize = inode->i_sb->s_blocksize;
+-#endif
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-
+- inode->i_rdev = old_decode_dev(obj->yst_rdev);
+- inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+- inode->i_atime.tv_nsec = 0;
+- inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+- inode->i_mtime.tv_nsec = 0;
+- inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+- inode->i_ctime.tv_nsec = 0;
+-#else
+- inode->i_rdev = obj->yst_rdev;
+- inode->i_atime = obj->yst_atime;
+- inode->i_mtime = obj->yst_mtime;
+- inode->i_ctime = obj->yst_ctime;
+-#endif
+- inode->i_size = yaffs_GetObjectFileLength(obj);
+- inode->i_blocks = (inode->i_size + 511) >> 9;
+-
+- inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
+- inode->i_mode, inode->i_uid, inode->i_gid,
+- (int)inode->i_size, atomic_read(&inode->i_count)));
+-
+- switch (obj->yst_mode & S_IFMT) {
+- default: /* fifo, device or socket */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- init_special_inode(inode, obj->yst_mode,
+- old_decode_dev(obj->yst_rdev));
+-#else
+- init_special_inode(inode, obj->yst_mode,
+- (dev_t) (obj->yst_rdev));
+-#endif
+- break;
+- case S_IFREG: /* file */
+- inode->i_op = &yaffs_file_inode_operations;
+- inode->i_fop = &yaffs_file_operations;
+- inode->i_mapping->a_ops =
+- &yaffs_file_address_operations;
+- break;
+- case S_IFDIR: /* directory */
+- inode->i_op = &yaffs_dir_inode_operations;
+- inode->i_fop = &yaffs_dir_operations;
+- break;
+- case S_IFLNK: /* symlink */
+- inode->i_op = &yaffs_symlink_inode_operations;
+- break;
+- }
+-
+- yaffs_InodeToObjectLV(inode) = obj;
+-
+- obj->myInode = inode;
+-
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_FileInode invalid parameters\n"));
+- }
+-
+-}
+-
+-struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object *obj)
+-{
+- struct inode *inode;
+-
+- if (!sb) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_get_inode for NULL super_block!!\n"));
+- return NULL;
+-
+- }
+-
+- if (!obj) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_get_inode for NULL object!!\n"));
+- return NULL;
+-
+- }
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_get_inode for object %d\n", obj->objectId));
+-
+- inode = Y_IGET(sb, obj->objectId);
+- if (IS_ERR(inode))
+- return NULL;
+-
+- /* NB Side effect: iget calls back to yaffs_read_inode(). */
+- /* iget also increments the inode's i_count */
+- /* NB You can't be holding grossLock or deadlock will happen! */
+-
+- return inode;
+-}
+-
+-static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t *pos)
+-{
+- yaffs_Object *obj;
+- int nWritten, ipos;
+- struct inode *inode;
+- yaffs_Device *dev;
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- inode = f->f_dentry->d_inode;
+-
+- if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+- ipos = inode->i_size;
+- else
+- ipos = *pos;
+-
+- if (!obj)
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write: hey obj is null!\n"));
+- else
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write about to write writing %zu bytes"
+- "to object %d at %d\n",
+- n, obj->objectId, ipos));
+-
+- nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write writing %zu bytes, %d written at %d\n",
+- n, nWritten, ipos));
+-
+- if (nWritten > 0) {
+- ipos += nWritten;
+- *pos = ipos;
+- if (ipos > inode->i_size) {
+- inode->i_size = ipos;
+- inode->i_blocks = (ipos + 511) >> 9;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write size updated to %d bytes, "
+- "%d blocks\n",
+- ipos, (int)(inode->i_blocks)));
+- }
+-
+- }
+- yaffs_GrossUnlock(dev);
+- return nWritten == 0 ? -ENOSPC : nWritten;
+-}
+-
+-/* Space holding and freeing is done to ensure we have space available for write_begin/end */
+-/* For now we just assume few parallel writes and check against a small number. */
+-/* Todo: need to do this with a counter to handle parallel reads better */
+-
+-static ssize_t yaffs_hold_space(struct file *f)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+- int nFreeChunks;
+-
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- nFreeChunks = yaffs_GetNumberOfFreeChunks(dev);
+-
+- yaffs_GrossUnlock(dev);
+-
+- return (nFreeChunks > 20) ? 1 : 0;
+-}
+-
+-static void yaffs_release_space(struct file *f)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+-
+- yaffs_GrossUnlock(dev);
+-}
+-
+-static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+- struct inode *inode = f->f_dentry->d_inode;
+- unsigned long offset, curoffs;
+- struct ylist_head *i;
+- yaffs_Object *l;
+-
+- char name[YAFFS_MAX_NAME_LENGTH + 1];
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- offset = f->f_pos;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_readdir: starting at %d\n", (int)offset));
+-
+- if (offset == 0) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_readdir: entry . ino %d \n",
+- (int)inode->i_ino));
+- if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0)
+- goto out;
+- offset++;
+- f->f_pos++;
+- }
+- if (offset == 1) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_readdir: entry .. ino %d \n",
+- (int)f->f_dentry->d_parent->d_inode->i_ino));
+- if (filldir(dirent, "..", 2, offset,
+- f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+- goto out;
+- offset++;
+- f->f_pos++;
+- }
+-
+- curoffs = 1;
+-
+- /* If the directory has changed since the open or last call to
+- readdir, rewind to after the 2 canned entries. */
+-
+- if (f->f_version != inode->i_version) {
+- offset = 2;
+- f->f_pos = offset;
+- f->f_version = inode->i_version;
+- }
+-
+- ylist_for_each(i, &obj->variant.directoryVariant.children) {
+- curoffs++;
+- if (curoffs >= offset) {
+- l = ylist_entry(i, yaffs_Object, siblings);
+-
+- yaffs_GetObjectName(l, name,
+- YAFFS_MAX_NAME_LENGTH + 1);
+- T(YAFFS_TRACE_OS,
+- ("yaffs_readdir: %s inode %d\n", name,
+- yaffs_GetObjectInode(l)));
+-
+- if (filldir(dirent,
+- name,
+- strlen(name),
+- offset,
+- yaffs_GetObjectInode(l),
+- yaffs_GetObjectType(l)) < 0)
+- goto up_and_out;
+-
+- offset++;
+- f->f_pos++;
+- }
+- }
+-
+-up_and_out:
+-out:
+- yaffs_GrossUnlock(dev);
+-
+- return 0;
+-}
+-
+-/*
+- * File creation. Allocate an inode, and we're done..
+- */
+-
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+-#define YCRED(x) x
+-#else
+-#define YCRED(x) (x->cred)
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t rdev)
+-#else
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int rdev)
+-#endif
+-{
+- struct inode *inode;
+-
+- yaffs_Object *obj = NULL;
+- yaffs_Device *dev;
+-
+- yaffs_Object *parent = yaffs_InodeToObject(dir);
+-
+- int error = -ENOSPC;
+- uid_t uid = YCRED(current)->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+-
+- if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+- mode |= S_ISGID;
+-
+- if (parent) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod: parent object %d type %d\n",
+- parent->objectId, parent->variantType));
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod: could not get parent object\n"));
+- return -EPERM;
+- }
+-
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
+- "mode %x dev %x\n",
+- dentry->d_name.name, mode, rdev));
+-
+- dev = parent->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- switch (mode & S_IFMT) {
+- default:
+- /* Special (socket, fifo, device...) */
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making special\n"));
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, old_encode_dev(rdev));
+-#else
+- obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, rdev);
+-#endif
+- break;
+- case S_IFREG: /* file */
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making file\n"));
+- obj = yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
+- gid);
+- break;
+- case S_IFDIR: /* directory */
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod: making directory\n"));
+- obj = yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
+- uid, gid);
+- break;
+- case S_IFLNK: /* symlink */
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making symlink\n"));
+- obj = NULL; /* Do we ever get here? */
+- break;
+- }
+-
+- /* Can not call yaffs_get_inode() with gross lock held */
+- yaffs_GrossUnlock(dev);
+-
+- if (obj) {
+- inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+- d_instantiate(dentry, inode);
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod created object %d count = %d\n",
+- obj->objectId, atomic_read(&inode->i_count)));
+- error = 0;
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod failed making object\n"));
+- error = -ENOMEM;
+- }
+-
+- return error;
+-}
+-
+-static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+-{
+- int retVal;
+- T(YAFFS_TRACE_OS, ("yaffs_mkdir\n"));
+- retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+- return retVal;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+- struct nameidata *n)
+-#else
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+-#endif
+-{
+- T(YAFFS_TRACE_OS, ("yaffs_create\n"));
+- return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+-}
+-
+-static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+-{
+- int retVal;
+-
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_unlink %d:%s\n", (int)(dir->i_ino),
+- dentry->d_name.name));
+-
+- dev = yaffs_InodeToObject(dir)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- retVal = yaffs_Unlink(yaffs_InodeToObject(dir), dentry->d_name.name);
+-
+- if (retVal == YAFFS_OK) {
+- dentry->d_inode->i_nlink--;
+- dir->i_version++;
+- yaffs_GrossUnlock(dev);
+- mark_inode_dirty(dentry->d_inode);
+- return 0;
+- }
+- yaffs_GrossUnlock(dev);
+- return -ENOTEMPTY;
+-}
+-
+-/*
+- * Create a link...
+- */
+-static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry)
+-{
+- struct inode *inode = old_dentry->d_inode;
+- yaffs_Object *obj = NULL;
+- yaffs_Object *link = NULL;
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_link\n"));
+-
+- obj = yaffs_InodeToObject(inode);
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+- link = yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
+- obj);
+-
+- if (link) {
+- old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+- d_instantiate(dentry, old_dentry->d_inode);
+- atomic_inc(&old_dentry->d_inode->i_count);
+- T(YAFFS_TRACE_OS,
+- ("yaffs_link link count %d i_count %d\n",
+- old_dentry->d_inode->i_nlink,
+- atomic_read(&old_dentry->d_inode->i_count)));
+- }
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (link)
+- return 0;
+-
+- return -EPERM;
+-}
+-
+-static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+- uid_t uid = YCRED(current)->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_symlink\n"));
+-
+- dev = yaffs_InodeToObject(dir)->myDev;
+- yaffs_GrossLock(dev);
+- obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
+- S_IFLNK | S_IRWXUGO, uid, gid, symname);
+- yaffs_GrossUnlock(dev);
+-
+- if (obj) {
+- struct inode *inode;
+-
+- inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+- d_instantiate(dentry, inode);
+- T(YAFFS_TRACE_OS, ("symlink created OK\n"));
+- return 0;
+- } else {
+- T(YAFFS_TRACE_OS, ("symlink not created\n"));
+- }
+-
+- return -ENOMEM;
+-}
+-
+-static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync)
+-{
+-
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+- obj = yaffs_DentryToObject(dentry);
+-
+- dev = obj->myDev;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_sync_object\n"));
+- yaffs_GrossLock(dev);
+- yaffs_FlushFile(obj, 1);
+- yaffs_GrossUnlock(dev);
+- return 0;
+-}
+-
+-/*
+- * The VFS layer already does all the dentry stuff for rename.
+- *
+- * NB: POSIX says you can rename an object over an old object of the same name
+- */
+-static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry)
+-{
+- yaffs_Device *dev;
+- int retVal = YAFFS_FAIL;
+- yaffs_Object *target;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_rename\n"));
+- dev = yaffs_InodeToObject(old_dir)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- /* Check if the target is an existing directory that is not empty. */
+- target = yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
+-
+-
+-
+- if (target && target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !ylist_empty(&target->variant.directoryVariant.children)) {
+-
+- T(YAFFS_TRACE_OS, ("target is non-empty dir\n"));
+-
+- retVal = YAFFS_FAIL;
+- } else {
+- /* Now does unlinking internally using shadowing mechanism */
+- T(YAFFS_TRACE_OS, ("calling yaffs_RenameObject\n"));
+-
+- retVal = yaffs_RenameObject(yaffs_InodeToObject(old_dir),
+- old_dentry->d_name.name,
+- yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
+- }
+- yaffs_GrossUnlock(dev);
+-
+- if (retVal == YAFFS_OK) {
+- if (target) {
+- new_dentry->d_inode->i_nlink--;
+- mark_inode_dirty(new_dentry->d_inode);
+- }
+-
+- return 0;
+- } else {
+- return -ENOTEMPTY;
+- }
+-}
+-
+-static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+-{
+- struct inode *inode = dentry->d_inode;
+- int error;
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_setattr of object %d\n",
+- yaffs_InodeToObject(inode)->objectId));
+-
+- error = inode_change_ok(inode, attr);
+- if (error == 0) {
+- dev = yaffs_InodeToObject(inode)->myDev;
+- yaffs_GrossLock(dev);
+- if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
+- YAFFS_OK) {
+- error = 0;
+- } else {
+- error = -EPERM;
+- }
+- yaffs_GrossUnlock(dev);
+- if (!error)
+- error = inode_setattr(inode, attr);
+- }
+- return error;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+-{
+- yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+- struct super_block *sb = dentry->d_sb;
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-#else
+-static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-#endif
+-
+- T(YAFFS_TRACE_OS, ("yaffs_statfs\n"));
+-
+- yaffs_GrossLock(dev);
+-
+- buf->f_type = YAFFS_MAGIC;
+- buf->f_bsize = sb->s_blocksize;
+- buf->f_namelen = 255;
+-
+- if (dev->nDataBytesPerChunk & (dev->nDataBytesPerChunk - 1)) {
+- /* Do this if chunk size is not a power of 2 */
+-
+- uint64_t bytesInDev;
+- uint64_t bytesFree;
+-
+- bytesInDev = ((uint64_t)((dev->endBlock - dev->startBlock + 1))) *
+- ((uint64_t)(dev->nChunksPerBlock * dev->nDataBytesPerChunk));
+-
+- do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
+- buf->f_blocks = bytesInDev;
+-
+- bytesFree = ((uint64_t)(yaffs_GetNumberOfFreeChunks(dev))) *
+- ((uint64_t)(dev->nDataBytesPerChunk));
+-
+- do_div(bytesFree, sb->s_blocksize);
+-
+- buf->f_bfree = bytesFree;
+-
+- } else if (sb->s_blocksize > dev->nDataBytesPerChunk) {
+-
+- buf->f_blocks =
+- (dev->endBlock - dev->startBlock + 1) *
+- dev->nChunksPerBlock /
+- (sb->s_blocksize / dev->nDataBytesPerChunk);
+- buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) /
+- (sb->s_blocksize / dev->nDataBytesPerChunk);
+- } else {
+- buf->f_blocks =
+- (dev->endBlock - dev->startBlock + 1) *
+- dev->nChunksPerBlock *
+- (dev->nDataBytesPerChunk / sb->s_blocksize);
+-
+- buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) *
+- (dev->nDataBytesPerChunk / sb->s_blocksize);
+- }
+-
+- buf->f_files = 0;
+- buf->f_ffree = 0;
+- buf->f_bavail = buf->f_bfree;
+-
+- yaffs_GrossUnlock(dev);
+- return 0;
+-}
+-
+-
+-static int yaffs_do_sync_fs(struct super_block *sb)
+-{
+-
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+- T(YAFFS_TRACE_OS, ("yaffs_do_sync_fs\n"));
+-
+- if (sb->s_dirt) {
+- yaffs_GrossLock(dev);
+-
+- if (dev) {
+- yaffs_FlushEntireDeviceCache(dev);
+- yaffs_CheckpointSave(dev);
+- }
+-
+- yaffs_GrossUnlock(dev);
+-
+- sb->s_dirt = 0;
+- }
+- return 0;
+-}
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static void yaffs_write_super(struct super_block *sb)
+-#else
+-static int yaffs_write_super(struct super_block *sb)
+-#endif
+-{
+-
+- T(YAFFS_TRACE_OS, ("yaffs_write_super\n"));
+- if (yaffs_auto_checkpoint >= 2)
+- yaffs_do_sync_fs(sb);
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+- return 0;
+-#endif
+-}
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_sync_fs(struct super_block *sb, int wait)
+-#else
+-static int yaffs_sync_fs(struct super_block *sb)
+-#endif
+-{
+- T(YAFFS_TRACE_OS, ("yaffs_sync_fs\n"));
+-
+- if (yaffs_auto_checkpoint >= 1)
+- yaffs_do_sync_fs(sb);
+-
+- return 0;
+-}
+-
+-#ifdef YAFFS_USE_OWN_IGET
+-
+-static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+-{
+- struct inode *inode;
+- yaffs_Object *obj;
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_iget for %lu\n", ino));
+-
+- inode = iget_locked(sb, ino);
+- if (!inode)
+- return ERR_PTR(-ENOMEM);
+- if (!(inode->i_state & I_NEW))
+- return inode;
+-
+- /* NB This is called as a side effect of other functions, but
+- * we had to release the lock to prevent deadlocks, so
+- * need to lock again.
+- */
+-
+- yaffs_GrossLock(dev);
+-
+- obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
+-
+- yaffs_FillInodeFromObject(inode, obj);
+-
+- yaffs_GrossUnlock(dev);
+-
+- unlock_new_inode(inode);
+- return inode;
+-}
+-
+-#else
+-
+-static void yaffs_read_inode(struct inode *inode)
+-{
+- /* NB This is called as a side effect of other functions, but
+- * we had to release the lock to prevent deadlocks, so
+- * need to lock again.
+- */
+-
+- yaffs_Object *obj;
+- yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_read_inode for %d\n", (int)inode->i_ino));
+-
+- yaffs_GrossLock(dev);
+-
+- obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
+-
+- yaffs_FillInodeFromObject(inode, obj);
+-
+- yaffs_GrossUnlock(dev);
+-}
+-
+-#endif
+-
+-static YLIST_HEAD(yaffs_dev_list);
+-
+-#if 0 /* not used */
+-static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-
+- if (*flags & MS_RDONLY) {
+- struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_remount_fs: %s: RO\n", dev->name));
+-
+- yaffs_GrossLock(dev);
+-
+- yaffs_FlushEntireDeviceCache(dev);
+-
+- yaffs_CheckpointSave(dev);
+-
+- if (mtd->sync)
+- mtd->sync(mtd);
+-
+- yaffs_GrossUnlock(dev);
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_remount_fs: %s: RW\n", dev->name));
+- }
+-
+- return 0;
+-}
+-#endif
+-
+-static void yaffs_put_super(struct super_block *sb)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-
+- T(YAFFS_TRACE_OS, ("yaffs_put_super\n"));
+-
+- yaffs_GrossLock(dev);
+-
+- yaffs_FlushEntireDeviceCache(dev);
+-
+- yaffs_CheckpointSave(dev);
+-
+- if (dev->putSuperFunc)
+- dev->putSuperFunc(sb);
+-
+- yaffs_Deinitialise(dev);
+-
+- yaffs_GrossUnlock(dev);
+-
+- /* we assume this is protected by lock_kernel() in mount/umount */
+- ylist_del(&dev->devList);
+-
+- if (dev->spareBuffer) {
+- YFREE(dev->spareBuffer);
+- dev->spareBuffer = NULL;
+- }
+-
+- kfree(dev);
+-}
+-
+-
+-static void yaffs_MTDPutSuper(struct super_block *sb)
+-{
+- struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+-
+- if (mtd->sync)
+- mtd->sync(mtd);
+-
+- put_mtd_device(mtd);
+-}
+-
+-
+-static void yaffs_MarkSuperBlockDirty(void *vsb)
+-{
+- struct super_block *sb = (struct super_block *)vsb;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_MarkSuperBlockDirty() sb = %p\n", sb));
+- if (sb)
+- sb->s_dirt = 1;
+-}
+-
+-typedef struct {
+- int inband_tags;
+- int skip_checkpoint_read;
+- int skip_checkpoint_write;
+- int no_cache;
+-} yaffs_options;
+-
+-#define MAX_OPT_LEN 20
+-static int yaffs_parse_options(yaffs_options *options, const char *options_str)
+-{
+- char cur_opt[MAX_OPT_LEN + 1];
+- int p;
+- int error = 0;
+-
+- /* Parse through the options which is a comma seperated list */
+-
+- while (options_str && *options_str && !error) {
+- memset(cur_opt, 0, MAX_OPT_LEN + 1);
+- p = 0;
+-
+- while (*options_str && *options_str != ',') {
+- if (p < MAX_OPT_LEN) {
+- cur_opt[p] = *options_str;
+- p++;
+- }
+- options_str++;
+- }
+-
+- if (!strcmp(cur_opt, "inband-tags"))
+- options->inband_tags = 1;
+- else if (!strcmp(cur_opt, "no-cache"))
+- options->no_cache = 1;
+- else if (!strcmp(cur_opt, "no-checkpoint-read"))
+- options->skip_checkpoint_read = 1;
+- else if (!strcmp(cur_opt, "no-checkpoint-write"))
+- options->skip_checkpoint_write = 1;
+- else if (!strcmp(cur_opt, "no-checkpoint")) {
+- options->skip_checkpoint_read = 1;
+- options->skip_checkpoint_write = 1;
+- } else {
+- printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+- cur_opt);
+- error = 1;
+- }
+- }
+-
+- return error;
+-}
+-
+-static struct super_block *yaffs_internal_read_super(int yaffsVersion,
+- struct super_block *sb,
+- void *data, int silent)
+-{
+- int nBlocks;
+- struct inode *inode = NULL;
+- struct dentry *root;
+- yaffs_Device *dev = 0;
+- char devname_buf[BDEVNAME_SIZE + 1];
+- struct mtd_info *mtd;
+- int err;
+- char *data_str = (char *)data;
+-
+- yaffs_options options;
+-
+- sb->s_magic = YAFFS_MAGIC;
+- sb->s_op = &yaffs_super_ops;
+- sb->s_flags |= MS_NOATIME;
+-
+- if (!sb)
+- printk(KERN_INFO "yaffs: sb is NULL\n");
+- else if (!sb->s_dev)
+- printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+- else if (!yaffs_devname(sb, devname_buf))
+- printk(KERN_INFO "yaffs: devname is NULL\n");
+- else
+- printk(KERN_INFO "yaffs: dev is %d name is \"%s\"\n",
+- sb->s_dev,
+- yaffs_devname(sb, devname_buf));
+-
+- if (!data_str)
+- data_str = "";
+-
+- printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+-
+- memset(&options, 0, sizeof(options));
+-
+- if (yaffs_parse_options(&options, data_str)) {
+- /* Option parsing failed */
+- return NULL;
+- }
+-
+-
+- sb->s_blocksize = PAGE_CACHE_SIZE;
+- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: Using yaffs%d\n", yaffsVersion));
+- T(YAFFS_TRACE_OS,
+- ("yaffs_read_super: block size %d\n", (int)(sb->s_blocksize)));
+-
+-#ifdef CONFIG_YAFFS_DISABLE_WRITE_VERIFY
+- T(YAFFS_TRACE_OS,
+- ("yaffs: Write verification disabled. All guarantees "
+- "null and void\n"));
+-#endif
+-
+- T(YAFFS_TRACE_ALWAYS, ("yaffs: Attempting MTD mount on %u.%u, "
+- "\"%s\"\n",
+- MAJOR(sb->s_dev), MINOR(sb->s_dev),
+- yaffs_devname(sb, devname_buf)));
+-
+- /* Check it's an mtd device..... */
+- if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+- return NULL; /* This isn't an mtd device */
+-
+- /* Get the device */
+- mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+- if (!mtd) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device #%u doesn't appear to exist\n",
+- MINOR(sb->s_dev)));
+- return NULL;
+- }
+- /* Check it's NAND */
+- if (mtd->type != MTD_NANDFLASH) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device is not NAND it's type %d\n", mtd->type));
+- return NULL;
+- }
+-
+- T(YAFFS_TRACE_OS, (" erase %p\n", mtd->erase));
+- T(YAFFS_TRACE_OS, (" read %p\n", mtd->read));
+- T(YAFFS_TRACE_OS, (" write %p\n", mtd->write));
+- T(YAFFS_TRACE_OS, (" readoob %p\n", mtd->read_oob));
+- T(YAFFS_TRACE_OS, (" writeoob %p\n", mtd->write_oob));
+- T(YAFFS_TRACE_OS, (" block_isbad %p\n", mtd->block_isbad));
+- T(YAFFS_TRACE_OS, (" block_markbad %p\n", mtd->block_markbad));
+- T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
+- T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
+- T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+- T(YAFFS_TRACE_OS, (" size %u\n", mtd->size));
+-#else
+- T(YAFFS_TRACE_OS, (" size %lld\n", mtd->size));
+-#endif
+-
+-#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+-
+- if (yaffsVersion == 1 && WRITE_SIZE(mtd) >= 2048) {
+- T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs2\n"));
+- yaffsVersion = 2;
+- }
+-
+- /* Added NCB 26/5/2006 for completeness */
+- if (yaffsVersion == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
+- T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs1\n"));
+- yaffsVersion = 1;
+- }
+-
+-#endif
+-
+- if (yaffsVersion == 2) {
+- /* Check for version 2 style functions */
+- if (!mtd->erase ||
+- !mtd->block_isbad ||
+- !mtd->block_markbad ||
+- !mtd->read ||
+- !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- !mtd->read_oob || !mtd->write_oob) {
+-#else
+- !mtd->write_ecc ||
+- !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not support required "
+- "functions\n"));;
+- return NULL;
+- }
+-
+- if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+- mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+- !options.inband_tags) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not have the "
+- "right page sizes\n"));
+- return NULL;
+- }
+- } else {
+- /* Check for V1 style functions */
+- if (!mtd->erase ||
+- !mtd->read ||
+- !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- !mtd->read_oob || !mtd->write_oob) {
+-#else
+- !mtd->write_ecc ||
+- !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not support required "
+- "functions\n"));;
+- return NULL;
+- }
+-
+- if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+- mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not support have the "
+- "right page sizes\n"));
+- return NULL;
+- }
+- }
+-
+- /* OK, so if we got here, we have an MTD that's NAND and looks
+- * like it has the right capabilities
+- * Set the yaffs_Device up for mtd
+- */
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+-#else
+- sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+-#endif
+- if (!dev) {
+- /* Deep shit could not allocate device structure */
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs_read_super: Failed trying to allocate "
+- "yaffs_Device. \n"));
+- return NULL;
+- }
+-
+- memset(dev, 0, sizeof(yaffs_Device));
+- dev->genericDevice = mtd;
+- dev->name = mtd->name;
+-
+- /* Set up the memory size parameters.... */
+-
+- nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
+-
+- dev->startBlock = 0;
+- dev->endBlock = nBlocks - 1;
+- dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
+- dev->totalBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
+- dev->nReservedBlocks = 5;
+- dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
+- dev->inbandTags = options.inband_tags;
+-
+- /* ... and the functions. */
+- if (yaffsVersion == 2) {
+- dev->writeChunkWithTagsToNAND =
+- nandmtd2_WriteChunkWithTagsToNAND;
+- dev->readChunkWithTagsFromNAND =
+- nandmtd2_ReadChunkWithTagsFromNAND;
+- dev->markNANDBlockBad = nandmtd2_MarkNANDBlockBad;
+- dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
+- dev->spareBuffer = YMALLOC(mtd->oobsize);
+- dev->isYaffs2 = 1;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- dev->totalBytesPerChunk = mtd->writesize;
+- dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
+-#else
+- dev->totalBytesPerChunk = mtd->oobblock;
+- dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
+-#endif
+- nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+-
+- dev->startBlock = 0;
+- dev->endBlock = nBlocks - 1;
+- } else {
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- /* use the MTD interface in yaffs_mtdif1.c */
+- dev->writeChunkWithTagsToNAND =
+- nandmtd1_WriteChunkWithTagsToNAND;
+- dev->readChunkWithTagsFromNAND =
+- nandmtd1_ReadChunkWithTagsFromNAND;
+- dev->markNANDBlockBad = nandmtd1_MarkNANDBlockBad;
+- dev->queryNANDBlock = nandmtd1_QueryNANDBlock;
+-#else
+- dev->writeChunkToNAND = nandmtd_WriteChunkToNAND;
+- dev->readChunkFromNAND = nandmtd_ReadChunkFromNAND;
+-#endif
+- dev->isYaffs2 = 0;
+- }
+- /* ... and common functions */
+- dev->eraseBlockInNAND = nandmtd_EraseBlockInNAND;
+- dev->initialiseNAND = nandmtd_InitialiseNAND;
+-
+- dev->putSuperFunc = yaffs_MTDPutSuper;
+-
+- dev->superBlock = (void *)sb;
+- dev->markSuperBlockDirty = yaffs_MarkSuperBlockDirty;
+-
+-
+-#ifndef CONFIG_YAFFS_DOES_ECC
+- dev->useNANDECC = 1;
+-#endif
+-
+-#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
+- dev->wideTnodesDisabled = 1;
+-#endif
+-
+- dev->skipCheckpointRead = options.skip_checkpoint_read;
+- dev->skipCheckpointWrite = options.skip_checkpoint_write;
+-
+- /* we assume this is protected by lock_kernel() in mount/umount */
+- ylist_add_tail(&dev->devList, &yaffs_dev_list);
+-
+- init_MUTEX(&dev->grossLock);
+-
+- yaffs_GrossLock(dev);
+-
+- err = yaffs_GutsInitialise(dev);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_read_super: guts initialised %s\n",
+- (err == YAFFS_OK) ? "OK" : "FAILED"));
+-
+- /* Release lock before yaffs_get_inode() */
+- yaffs_GrossUnlock(dev);
+-
+- /* Create root inode */
+- if (err == YAFFS_OK)
+- inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
+- yaffs_Root(dev));
+-
+- if (!inode)
+- return NULL;
+-
+- inode->i_op = &yaffs_dir_inode_operations;
+- inode->i_fop = &yaffs_dir_operations;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: got root inode\n"));
+-
+- root = d_alloc_root(inode);
+-
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: d_alloc_root done\n"));
+-
+- if (!root) {
+- iput(inode);
+- return NULL;
+- }
+- sb->s_root = root;
+- sb->s_dirt = !dev->isCheckpointed;
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs_read_super: isCheckpointed %d\n", dev->isCheckpointed));
+-
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
+- return sb;
+-}
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data, struct vfsmount *mnt)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd, mnt);
+-}
+-#else
+-static struct super_block *yaffs_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd);
+-}
+-#endif
+-
+-static struct file_system_type yaffs_fs_type = {
+- .owner = THIS_MODULE,
+- .name = "yaffs",
+- .get_sb = yaffs_read_super,
+- .kill_sb = kill_block_super,
+- .fs_flags = FS_REQUIRES_DEV,
+-};
+-#else
+-static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(1, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+-
+-
+-#ifdef CONFIG_YAFFS_YAFFS2
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name, void *data,
+- struct vfsmount *mnt)
+-{
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd, mnt);
+-}
+-#else
+-static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd);
+-}
+-#endif
+-
+-static struct file_system_type yaffs2_fs_type = {
+- .owner = THIS_MODULE,
+- .name = "yaffs2",
+- .get_sb = yaffs2_read_super,
+- .kill_sb = kill_block_super,
+- .fs_flags = FS_REQUIRES_DEV,
+-};
+-#else
+-static struct super_block *yaffs2_read_super(struct super_block *sb,
+- void *data, int silent)
+-{
+- return yaffs_internal_read_super(2, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+-
+-#endif /* CONFIG_YAFFS_YAFFS2 */
+-
+-static struct proc_dir_entry *my_proc_entry;
+-
+-static char *yaffs_dump_dev(char *buf, yaffs_Device * dev)
+-{
+- buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
+- buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
+- buf += sprintf(buf, "totalBytesPerChunk. %d\n", dev->totalBytesPerChunk);
+- buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
+- buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
+- buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
+- buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
+- buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
+- buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
+- buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
+- buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
+- buf += sprintf(buf, "nObjectsCreated.... %d\n", dev->nObjectsCreated);
+- buf += sprintf(buf, "nFreeObjects....... %d\n", dev->nFreeObjects);
+- buf += sprintf(buf, "nFreeChunks........ %d\n", dev->nFreeChunks);
+- buf += sprintf(buf, "nPageWrites........ %d\n", dev->nPageWrites);
+- buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
+- buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
+- buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
+- buf += sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
+- buf += sprintf(buf, "passiveGCs......... %d\n",
+- dev->passiveGarbageCollections);
+- buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
+- buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
+- buf += sprintf(buf, "nRetireBlocks...... %d\n", dev->nRetiredBlocks);
+- buf += sprintf(buf, "eccFixed........... %d\n", dev->eccFixed);
+- buf += sprintf(buf, "eccUnfixed......... %d\n", dev->eccUnfixed);
+- buf += sprintf(buf, "tagsEccFixed....... %d\n", dev->tagsEccFixed);
+- buf += sprintf(buf, "tagsEccUnfixed..... %d\n", dev->tagsEccUnfixed);
+- buf += sprintf(buf, "cacheHits.......... %d\n", dev->cacheHits);
+- buf += sprintf(buf, "nDeletedFiles...... %d\n", dev->nDeletedFiles);
+- buf += sprintf(buf, "nUnlinkedFiles..... %d\n", dev->nUnlinkedFiles);
+- buf +=
+- sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
+- buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
+- buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
+- buf += sprintf(buf, "inbandTags......... %d\n", dev->inbandTags);
+-
+- return buf;
+-}
+-
+-static int yaffs_proc_read(char *page,
+- char **start,
+- off_t offset, int count, int *eof, void *data)
+-{
+- struct ylist_head *item;
+- char *buf = page;
+- int step = offset;
+- int n = 0;
+-
+- /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+- * We use 'offset' (*ppos) to indicate where we are in devList.
+- * This also assumes the user has posted a read buffer large
+- * enough to hold the complete output; but that's life in /proc.
+- */
+-
+- *(int *)start = 1;
+-
+- /* Print header first */
+- if (step == 0) {
+- buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__
+- "\n%s\n%s\n", yaffs_fs_c_version,
+- yaffs_guts_c_version);
+- }
+-
+- /* hold lock_kernel while traversing yaffs_dev_list */
+- lock_kernel();
+-
+- /* Locate and print the Nth entry. Order N-squared but N is small. */
+- ylist_for_each(item, &yaffs_dev_list) {
+- yaffs_Device *dev = ylist_entry(item, yaffs_Device, devList);
+- if (n < step) {
+- n++;
+- continue;
+- }
+- buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->name);
+- buf = yaffs_dump_dev(buf, dev);
+- break;
+- }
+- unlock_kernel();
+-
+- return buf - page < count ? buf - page : count;
+-}
+-
+-/**
+- * Set the verbosity of the warnings and error messages.
+- *
+- * Note that the names can only be a..z or _ with the current code.
+- */
+-
+-static struct {
+- char *mask_name;
+- unsigned mask_bitfield;
+-} mask_flags[] = {
+- {"allocate", YAFFS_TRACE_ALLOCATE},
+- {"always", YAFFS_TRACE_ALWAYS},
+- {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+- {"buffers", YAFFS_TRACE_BUFFERS},
+- {"bug", YAFFS_TRACE_BUG},
+- {"checkpt", YAFFS_TRACE_CHECKPOINT},
+- {"deletion", YAFFS_TRACE_DELETION},
+- {"erase", YAFFS_TRACE_ERASE},
+- {"error", YAFFS_TRACE_ERROR},
+- {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+- {"gc", YAFFS_TRACE_GC},
+- {"mtd", YAFFS_TRACE_MTD},
+- {"nandaccess", YAFFS_TRACE_NANDACCESS},
+- {"os", YAFFS_TRACE_OS},
+- {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+- {"scan", YAFFS_TRACE_SCAN},
+- {"tracing", YAFFS_TRACE_TRACING},
+-
+- {"verify", YAFFS_TRACE_VERIFY},
+- {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+- {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+- {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+-
+- {"write", YAFFS_TRACE_WRITE},
+- {"all", 0xffffffff},
+- {"none", 0},
+- {NULL, 0},
+-};
+-
+-#define MAX_MASK_NAME_LENGTH 40
+-static int yaffs_proc_write(struct file *file, const char *buf,
+- unsigned long count, void *data)
+-{
+- unsigned rg = 0, mask_bitfield;
+- char *end;
+- char *mask_name;
+- const char *x;
+- char substring[MAX_MASK_NAME_LENGTH + 1];
+- int i;
+- int done = 0;
+- int add, len = 0;
+- int pos = 0;
+-
+- rg = yaffs_traceMask;
+-
+- while (!done && (pos < count)) {
+- done = 1;
+- while ((pos < count) && isspace(buf[pos]))
+- pos++;
+-
+- switch (buf[pos]) {
+- case '+':
+- case '-':
+- case '=':
+- add = buf[pos];
+- pos++;
+- break;
+-
+- default:
+- add = ' ';
+- break;
+- }
+- mask_name = NULL;
+-
+- mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+-
+- if (end > buf + pos) {
+- mask_name = "numeral";
+- len = end - (buf + pos);
+- pos += len;
+- done = 0;
+- } else {
+- for (x = buf + pos, i = 0;
+- (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+- i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+- substring[i] = *x;
+- substring[i] = '\0';
+-
+- for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+- if (strcmp(substring, mask_flags[i].mask_name) == 0) {
+- mask_name = mask_flags[i].mask_name;
+- mask_bitfield = mask_flags[i].mask_bitfield;
+- done = 0;
+- break;
+- }
+- }
+- }
+-
+- if (mask_name != NULL) {
+- done = 0;
+- switch (add) {
+- case '-':
+- rg &= ~mask_bitfield;
+- break;
+- case '+':
+- rg |= mask_bitfield;
+- break;
+- case '=':
+- rg = mask_bitfield;
+- break;
+- default:
+- rg |= mask_bitfield;
+- break;
+- }
+- }
+- }
+-
+- yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
+-
+- printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_traceMask);
+-
+- if (rg & YAFFS_TRACE_ALWAYS) {
+- for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+- char flag;
+- flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
+- printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
+- }
+- }
+-
+- return count;
+-}
+-
+-/* Stuff to handle installation of file systems */
+-struct file_system_to_install {
+- struct file_system_type *fst;
+- int installed;
+-};
+-
+-static struct file_system_to_install fs_to_install[] = {
+- {&yaffs_fs_type, 0},
+- {&yaffs2_fs_type, 0},
+- {NULL, 0}
+-};
+-
+-static int __init init_yaffs_fs(void)
+-{
+- int error = 0;
+- struct file_system_to_install *fsinst;
+-
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs " __DATE__ " " __TIME__ " Installing. \n"));
+-
+- /* Install the proc_fs entry */
+- my_proc_entry = create_proc_entry("yaffs",
+- S_IRUGO | S_IFREG,
+- YPROC_ROOT);
+-
+- if (my_proc_entry) {
+- my_proc_entry->write_proc = yaffs_proc_write;
+- my_proc_entry->read_proc = yaffs_proc_read;
+- my_proc_entry->data = NULL;
+- } else
+- return -ENOMEM;
+-
+- /* Now add the file system entries */
+-
+- fsinst = fs_to_install;
+-
+- while (fsinst->fst && !error) {
+- error = register_filesystem(fsinst->fst);
+- if (!error)
+- fsinst->installed = 1;
+- fsinst++;
+- }
+-
+- /* Any errors? uninstall */
+- if (error) {
+- fsinst = fs_to_install;
+-
+- while (fsinst->fst) {
+- if (fsinst->installed) {
+- unregister_filesystem(fsinst->fst);
+- fsinst->installed = 0;
+- }
+- fsinst++;
+- }
+- }
+-
+- return error;
+-}
+-
+-static void __exit exit_yaffs_fs(void)
+-{
+-
+- struct file_system_to_install *fsinst;
+-
+- T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
+- " removing. \n"));
+-
+- remove_proc_entry("yaffs", YPROC_ROOT);
+-
+- fsinst = fs_to_install;
+-
+- while (fsinst->fst) {
+- if (fsinst->installed) {
+- unregister_filesystem(fsinst->fst);
+- fsinst->installed = 0;
+- }
+- fsinst++;
+- }
+-}
+-
+-module_init(init_yaffs_fs)
+-module_exit(exit_yaffs_fs)
+-
+-MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+-MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2006");
+-MODULE_LICENSE("GPL");
+--- a/fs/yaffs2/yaffs_getblockinfo.h
++++ b/fs/yaffs2/yaffs_getblockinfo.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,18 +17,19 @@
+ #define __YAFFS_GETBLOCKINFO_H__
+
+ #include "yaffs_guts.h"
++#include "yaffs_trace.h"
+
+ /* Function to manipulate block info */
+-static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
++static Y_INLINE yaffs_block_info_t *yaffs_get_block_info(yaffs_dev_t * dev, int blk)
+ {
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
+ blk));
+ YBUG();
+ }
+- return &dev->blockInfo[blk - dev->internalStartBlock];
++ return &dev->block_info[blk - dev->internal_start_block];
+ }
+
+ #endif
+--- a/fs/yaffs2/yaffs_guts.c
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -10,11 +10,8 @@
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+-
+-const char *yaffs_guts_c_version =
+- "$Id: yaffs_guts.c,v 1.82 2009-03-09 04:24:17 charles Exp $";
+-
+ #include "yportenv.h"
++#include "yaffs_trace.h"
+
+ #include "yaffsinterface.h"
+ #include "yaffs_guts.h"
+@@ -22,118 +19,109 @@ const char *yaffs_guts_c_version =
+ #include "yaffs_getblockinfo.h"
+
+ #include "yaffs_tagscompat.h"
+-#ifndef CONFIG_YAFFS_USE_OWN_SORT
+-#include "yaffs_qsort.h"
+-#endif
++
+ #include "yaffs_nand.h"
+
+-#include "yaffs_checkptrw.h"
++#include "yaffs_yaffs1.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_verify.h"
+
+ #include "yaffs_nand.h"
+ #include "yaffs_packedtags2.h"
+
++#include "yaffs_nameval.h"
++#include "yaffs_allocator.h"
+
+-#define YAFFS_PASSIVE_GC_CHUNKS 2
++/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
++#define YAFFS_GC_GOOD_ENOUGH 2
++#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+ #include "yaffs_ecc.h"
+
+
++
+ /* Robustification (if it ever comes about...) */
+-static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND);
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_retire_block(yaffs_dev_t *dev, int flash_block);
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk,
+ int erasedOk);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_ExtendedTags *tags);
++ const yaffs_ext_tags *tags);
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_ext_tags *tags);
+
+ /* Other local prototypes */
+-static int yaffs_UnlinkObject(yaffs_Object *obj);
+-static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
+-
+-static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
++static void yaffs_update_parent(yaffs_obj_t *obj);
++static int yaffs_unlink_obj(yaffs_obj_t *obj);
++static int yaffs_obj_cache_dirty(yaffs_obj_t *obj);
+
+-static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device *dev,
++static int yaffs_write_new_chunk(yaffs_dev_t *dev,
+ const __u8 *buffer,
+- yaffs_ExtendedTags *tags,
++ yaffs_ext_tags *tags,
+ int useReserve);
+-static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
+- int chunkInNAND, int inScan);
+
+-static yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
+- yaffs_ObjectType type);
+-static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
+- yaffs_Object *obj);
+-static int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name,
+- int force, int isShrink, int shadows);
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj);
+-static int yaffs_CheckStructures(void);
+-static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
+- int chunkOffset, int *limit);
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object *in);
+-
+-static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device *dev, int blockNo);
+
++static yaffs_obj_t *yaffs_new_obj(yaffs_dev_t *dev, int number,
++ yaffs_obj_type type);
+
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND);
+
+-static int yaffs_UnlinkWorker(yaffs_Object *obj);
++static int yaffs_apply_xattrib_mod(yaffs_obj_t *obj, char *buffer, yaffs_xattr_mod *xmod);
+
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
+- int chunkInObject);
++static void yaffs_remove_obj_from_dir(yaffs_obj_t *obj);
++static int yaffs_check_structures(void);
++static int yaffs_generic_obj_del(yaffs_obj_t *in);
++
++static int yaffs_check_chunk_erased(struct yaffs_dev_s *dev,
++ int nand_chunk);
+
+-static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
+- yaffs_BlockInfo **blockUsedPtr);
++static int yaffs_unlink_worker(yaffs_obj_t *obj);
+
+-static void yaffs_VerifyFreeChunks(yaffs_Device *dev);
++static int yaffs_tags_match(const yaffs_ext_tags *tags, int obj_id,
++ int chunkInObject);
+
+-static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
++static int yaffs_alloc_chunk(yaffs_dev_t *dev, int useReserve,
++ yaffs_block_info_t **blockUsedPtr);
+
+-static void yaffs_VerifyDirectory(yaffs_Object *directory);
+-#ifdef YAFFS_PARANOID
+-static int yaffs_CheckFileSanity(yaffs_Object *in);
+-#else
+-#define yaffs_CheckFileSanity(in)
+-#endif
++static void yaffs_check_obj_details_loaded(yaffs_obj_t *in);
++
++static void yaffs_invalidate_whole_cache(yaffs_obj_t *in);
++static void yaffs_invalidate_chunk_cache(yaffs_obj_t *object, int chunk_id);
+
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in);
+-static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId);
++static int yaffs_find_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ yaffs_ext_tags *tags);
+
+-static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
++static int yaffs_verify_chunk_written(yaffs_dev_t *dev,
++ int nand_chunk,
++ const __u8 *data,
++ yaffs_ext_tags *tags);
+
+-static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
+- yaffs_ExtendedTags *tags);
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
+- unsigned pos);
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct,
+- __u32 chunkId);
++static void yaffs_load_name_from_oh(yaffs_dev_t *dev,YCHAR *name, const YCHAR *ohName, int bufferSize);
++static void yaffs_load_oh_from_name(yaffs_dev_t *dev,YCHAR *ohName, const YCHAR *name);
+
+
+ /* Function to calculate chunk and offset */
+
+-static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut,
++static void yaffs_addr_to_chunk(yaffs_dev_t *dev, loff_t addr, int *chunkOut,
+ __u32 *offsetOut)
+ {
+ int chunk;
+ __u32 offset;
+
+- chunk = (__u32)(addr >> dev->chunkShift);
++ chunk = (__u32)(addr >> dev->chunk_shift);
+
+- if (dev->chunkDiv == 1) {
++ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+- offset = (__u32)(addr & dev->chunkMask);
++ offset = (__u32)(addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunkBase;
+
+- chunk /= dev->chunkDiv;
++ chunk /= dev->chunk_div;
+
+- chunkBase = ((loff_t)chunk) * dev->nDataBytesPerChunk;
++ chunkBase = ((loff_t)chunk) * dev->data_bytes_per_chunk;
+ offset = (__u32)(addr - chunkBase);
+ }
+
+@@ -172,7 +160,7 @@ static __u32 ShiftsGE(__u32 x)
+
+ static __u32 Shifts(__u32 x)
+ {
+- int nShifts;
++ __u32 nShifts;
+
+ nShifts = 0;
+
+@@ -193,49 +181,49 @@ static __u32 Shifts(__u32 x)
+ * Temporary buffer manipulations.
+ */
+
+-static int yaffs_InitialiseTempBuffers(yaffs_Device *dev)
++static int yaffs_init_tmp_buffers(yaffs_dev_t *dev)
+ {
+ int i;
+ __u8 *buf = (__u8 *)1;
+
+- memset(dev->tempBuffer, 0, sizeof(dev->tempBuffer));
++ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+- dev->tempBuffer[i].line = 0; /* not in use */
+- dev->tempBuffer[i].buffer = buf =
+- YMALLOC_DMA(dev->totalBytesPerChunk);
++ dev->temp_buffer[i].line = 0; /* not in use */
++ dev->temp_buffer[i].buffer = buf =
++ YMALLOC_DMA(dev->param.total_bytes_per_chunk);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+ }
+
+-__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo)
++__u8 *yaffs_get_temp_buffer(yaffs_dev_t *dev, int line_no)
+ {
+ int i, j;
+
+- dev->tempInUse++;
+- if (dev->tempInUse > dev->maxTemp)
+- dev->maxTemp = dev->tempInUse;
++ dev->temp_in_use++;
++ if (dev->temp_in_use > dev->max_temp)
++ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+- if (dev->tempBuffer[i].line == 0) {
+- dev->tempBuffer[i].line = lineNo;
+- if ((i + 1) > dev->maxTemp) {
+- dev->maxTemp = i + 1;
++ if (dev->temp_buffer[i].line == 0) {
++ dev->temp_buffer[i].line = line_no;
++ if ((i + 1) > dev->max_temp) {
++ dev->max_temp = i + 1;
+ for (j = 0; j <= i; j++)
+- dev->tempBuffer[j].maxLine =
+- dev->tempBuffer[j].line;
++ dev->temp_buffer[j].max_line =
++ dev->temp_buffer[j].line;
+ }
+
+- return dev->tempBuffer[i].buffer;
++ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Out of temp buffers at line %d, other held by lines:"),
+- lineNo));
++ line_no));
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+- T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
++ T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->temp_buffer[i].line));
+
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
+
+@@ -244,21 +232,21 @@ __u8 *yaffs_GetTempBuffer(yaffs_Device *
+ * This is not good.
+ */
+
+- dev->unmanagedTempAllocations++;
+- return YMALLOC(dev->nDataBytesPerChunk);
++ dev->unmanaged_buffer_allocs++;
++ return YMALLOC(dev->data_bytes_per_chunk);
+
+ }
+
+-void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer,
+- int lineNo)
++void yaffs_release_temp_buffer(yaffs_dev_t *dev, __u8 *buffer,
++ int line_no)
+ {
+ int i;
+
+- dev->tempInUse--;
++ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+- if (dev->tempBuffer[i].buffer == buffer) {
+- dev->tempBuffer[i].line = 0;
++ if (dev->temp_buffer[i].buffer == buffer) {
++ dev->temp_buffer[i].line = 0;
+ return;
+ }
+ }
+@@ -267,9 +255,9 @@ void yaffs_ReleaseTempBuffer(yaffs_Devic
+ /* assume it is an unmanaged one. */
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR),
+- lineNo));
++ line_no));
+ YFREE(buffer);
+- dev->unmanagedTempDeallocations++;
++ dev->unmanaged_buffer_deallocs++;
+ }
+
+ }
+@@ -277,21 +265,21 @@ void yaffs_ReleaseTempBuffer(yaffs_Devic
+ /*
+ * Determine if we have a managed buffer.
+ */
+-int yaffs_IsManagedTempBuffer(yaffs_Device *dev, const __u8 *buffer)
++int yaffs_is_managed_tmp_buffer(yaffs_dev_t *dev, const __u8 *buffer)
+ {
+ int i;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+- if (dev->tempBuffer[i].buffer == buffer)
++ if (dev->temp_buffer[i].buffer == buffer)
+ return 1;
+ }
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].data == buffer)
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].data == buffer)
+ return 1;
+ }
+
+- if (buffer == dev->checkpointBuffer)
++ if (buffer == dev->checkpt_buffer)
+ return 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+@@ -299,6397 +287,4205 @@ int yaffs_IsManagedTempBuffer(yaffs_Devi
+ return 0;
+ }
+
+-
+-
+ /*
+- * Chunk bitmap manipulations
++ * Verification code
+ */
+
+-static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device *dev, int blk)
+-{
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
+- blk));
+- YBUG();
+- }
+- return dev->chunkBits +
+- (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
+-}
+
+-static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
+-{
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
+- chunk < 0 || chunk >= dev->nChunksPerBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
+- blk, chunk));
+- YBUG();
+- }
+-}
+
+-static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device *dev, int blk)
+-{
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+- memset(blkBits, 0, dev->chunkBitmapStride);
+-}
++/*
++ * Simple hash function. Needs to have a reasonable spread
++ */
+
+-static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device *dev, int blk, int chunk)
++static Y_INLINE int yaffs_hash_fn(int n)
+ {
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+-
+- yaffs_VerifyChunkBitId(dev, blk, chunk);
+-
+- blkBits[chunk / 8] &= ~(1 << (chunk & 7));
++ n = abs(n);
++ return n % YAFFS_NOBJECT_BUCKETS;
+ }
+
+-static Y_INLINE void yaffs_SetChunkBit(yaffs_Device *dev, int blk, int chunk)
+-{
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+-
+- yaffs_VerifyChunkBitId(dev, blk, chunk);
+-
+- blkBits[chunk / 8] |= (1 << (chunk & 7));
+-}
++/*
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
++ */
+
+-static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device *dev, int blk, int chunk)
++yaffs_obj_t *yaffs_root(yaffs_dev_t *dev)
+ {
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- yaffs_VerifyChunkBitId(dev, blk, chunk);
+-
+- return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++ return dev->root_dir;
+ }
+
+-static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device *dev, int blk)
++yaffs_obj_t *yaffs_lost_n_found(yaffs_dev_t *dev)
+ {
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- int i;
+- for (i = 0; i < dev->chunkBitmapStride; i++) {
+- if (*blkBits)
+- return 1;
+- blkBits++;
+- }
+- return 0;
++ return dev->lost_n_found;
+ }
+
+-static int yaffs_CountChunkBits(yaffs_Device *dev, int blk)
+-{
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- int i;
+- int n = 0;
+- for (i = 0; i < dev->chunkBitmapStride; i++) {
+- __u8 x = *blkBits;
+- while (x) {
+- if (x & 1)
+- n++;
+- x >>= 1;
+- }
+-
+- blkBits++;
+- }
+- return n;
+-}
+
+ /*
+- * Verification code
++ * Erased NAND checking functions
+ */
+
+-static int yaffs_SkipVerification(yaffs_Device *dev)
+-{
+- return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+-}
+-
+-static int yaffs_SkipFullVerification(yaffs_Device *dev)
+-{
+- return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL));
+-}
+-
+-static int yaffs_SkipNANDVerification(yaffs_Device *dev)
++int yaffs_check_ff(__u8 *buffer, int n_bytes)
+ {
+- return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
++ /* Horrible, slow implementation */
++ while (n_bytes--) {
++ if (*buffer != 0xFF)
++ return 0;
++ buffer++;
++ }
++ return 1;
+ }
+
+-static const char *blockStateName[] = {
+-"Unknown",
+-"Needs scanning",
+-"Scanning",
+-"Empty",
+-"Allocating",
+-"Full",
+-"Dirty",
+-"Checkpoint",
+-"Collecting",
+-"Dead"
+-};
+-
+-static void yaffs_VerifyBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, int n)
++static int yaffs_check_chunk_erased(struct yaffs_dev_s *dev,
++ int nand_chunk)
+ {
+- int actuallyUsed;
+- int inUse;
++ int retval = YAFFS_OK;
++ __u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
++ yaffs_ext_tags tags;
++ int result;
+
+- if (yaffs_SkipVerification(dev))
+- return;
++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+- /* Report illegal runtime states */
+- if (bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->blockState));
++ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
++ retval = YAFFS_FAIL;
+
+- switch (bi->blockState) {
+- case YAFFS_BLOCK_STATE_UNKNOWN:
+- case YAFFS_BLOCK_STATE_SCANNING:
+- case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
+- n, blockStateName[bi->blockState]));
++ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) || tags.chunk_used) {
++ T(YAFFS_TRACE_NANDACCESS,
++ (TSTR("Chunk %d not erased" TENDSTR), nand_chunk));
++ retval = YAFFS_FAIL;
+ }
+
+- /* Check pages in use and soft deletions are legal */
+-
+- actuallyUsed = bi->pagesInUse - bi->softDeletions;
+-
+- if (bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
+- bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
+- actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
+- n, bi->pagesInUse, bi->softDeletions));
+-
++ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+- /* Check chunk bitmap legal */
+- inUse = yaffs_CountChunkBits(dev, n);
+- if (inUse != bi->pagesInUse)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
+- n, bi->pagesInUse, inUse));
++ return retval;
+
+- /* Check that the sequence number is valid.
+- * Ten million is legal, but is very unlikely
+- */
+- if (dev->isYaffs2 &&
+- (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
+- (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000))
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR),
+- n, bi->sequenceNumber));
+ }
+
+-static void yaffs_VerifyCollectedBlock(yaffs_Device *dev, yaffs_BlockInfo *bi,
+- int n)
++
++static int yaffs_verify_chunk_written(yaffs_dev_t *dev,
++ int nand_chunk,
++ const __u8 *data,
++ yaffs_ext_tags *tags)
+ {
+- yaffs_VerifyBlock(dev, bi, n);
++ int retval = YAFFS_OK;
++ yaffs_ext_tags tempTags;
++ __u8 *buffer = yaffs_get_temp_buffer(dev,__LINE__);
++ int result;
++
++ result = yaffs_rd_chunk_tags_nand(dev,nand_chunk,buffer,&tempTags);
++ if(memcmp(buffer,data,dev->data_bytes_per_chunk) ||
++ tempTags.obj_id != tags->obj_id ||
++ tempTags.chunk_id != tags->chunk_id ||
++ tempTags.n_bytes != tags->n_bytes)
++ retval = YAFFS_FAIL;
+
+- /* After collection the block should be in the erased state */
+- /* This will need to change if we do partial gc */
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+- if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING &&
+- bi->blockState != YAFFS_BLOCK_STATE_EMPTY) {
+- T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
+- n, bi->blockState));
+- }
++ return retval;
+ }
+
+-static void yaffs_VerifyBlocks(yaffs_Device *dev)
++static int yaffs_write_new_chunk(struct yaffs_dev_s *dev,
++ const __u8 *data,
++ yaffs_ext_tags *tags,
++ int useReserve)
+ {
+- int i;
+- int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
+- int nIllegalBlockStates = 0;
+-
+- if (yaffs_SkipVerification(dev))
+- return;
+-
+- memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
+-
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
+- yaffs_VerifyBlock(dev, bi, i);
+-
+- if (bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
+- nBlocksPerState[bi->blockState]++;
+- else
+- nIllegalBlockStates++;
+- }
++ int attempts = 0;
++ int writeOk = 0;
++ int chunk;
+
+- T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
++ yaffs2_checkpt_invalidate(dev);
+
+- T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
+- if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
++ do {
++ yaffs_block_info_t *bi = 0;
++ int erasedOk = 0;
+
+- for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("%s %d blocks"TENDSTR),
+- blockStateName[i], nBlocksPerState[i]));
++ chunk = yaffs_alloc_chunk(dev, useReserve, &bi);
++ if (chunk < 0) {
++ /* no space */
++ break;
++ }
+
+- if (dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
+- dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
++ /* First check this chunk is erased, if it needs
++ * checking. The checking policy (unless forced
++ * always on) is as follows:
++ *
++ * Check the first page we try to write in a block.
++ * If the check passes then we don't need to check any
++ * more. If the check fails, we check again...
++ * If the block has been erased, we don't need to check.
++ *
++ * However, if the block has been prioritised for gc,
++ * then we think there might be something odd about
++ * this block and stop using it.
++ *
++ * Rationale: We should only ever see chunks that have
++ * not been erased if there was a partially written
++ * chunk due to power loss. This checking policy should
++ * catch that case with very few checks and thus save a
++ * lot of checks that are most likely not needed.
++ *
++ * Mods to the above
++ * If an erase check fails or the write fails we skip the
++ * rest of the block.
++ */
+
+- if (dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
+- dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
++ /* let's give it a try */
++ attempts++;
+
+- if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
+- nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
++ if(dev->param.always_check_erased)
++ bi->skip_erased_check = 0;
+
+- T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++ if (!bi->skip_erased_check) {
++ erasedOk = yaffs_check_chunk_erased(dev, chunk);
++ if (erasedOk != YAFFS_OK) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs chunk %d was not erased"
++ TENDSTR), chunk));
+
+-}
++ /* If not erased, delete this one,
++ * skip rest of block and
++ * try another chunk */
++ yaffs_chunk_del(dev,chunk,1,__LINE__);
++ yaffs_skip_rest_of_block(dev);
++ continue;
++ }
++ }
+
+-/*
+- * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+- * case those tests will not be performed.
+- */
+-static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
+-{
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
++ writeOk = yaffs_wr_chunk_tags_nand(dev, chunk,
++ data, tags);
+
+- if (!(tags && obj && oh)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
+- (__u32)tags, (__u32)obj, (__u32)oh));
+- return;
+- }
++ if(!bi->skip_erased_check)
++ writeOk = yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+- if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+- oh->type > YAFFS_OBJECT_TYPE_MAX)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
+- tags->objectId, oh->type));
++ if (writeOk != YAFFS_OK) {
++ /* Clean up aborted write, skip to next block and
++ * try another chunk */
++ yaffs_handle_chunk_wr_error(dev, chunk, erasedOk);
++ continue;
++ }
+
+- if (tags->objectId != obj->objectId)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
+- tags->objectId, obj->objectId));
++ bi->skip_erased_check = 1;
+
++ /* Copy the data into the robustification buffer */
++ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+- /*
+- * Check that the object's parent ids match if parentCheck requested.
+- *
+- * Tests do not apply to the root object.
+- */
++ } while (writeOk != YAFFS_OK &&
++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+- if (parentCheck && tags->objectId > 1 && !obj->parent)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
+- tags->objectId, oh->parentObjectId));
++ if (!writeOk)
++ chunk = -1;
+
+- if (parentCheck && obj->parent &&
+- oh->parentObjectId != obj->parent->objectId &&
+- (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
+- obj->parent->objectId != YAFFS_OBJECTID_DELETED))
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
+- tags->objectId, oh->parentObjectId, obj->parent->objectId));
++ if (attempts > 1) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs write required %d attempts" TENDSTR),
++ attempts));
+
+- if (tags->objectId > 1 && oh->name[0] == 0) /* Null name */
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is NULL"TENDSTR),
+- obj->objectId));
++ dev->n_retired_writes += (attempts - 1);
++ }
+
+- if (tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is 0xFF"TENDSTR),
+- obj->objectId));
++ return chunk;
+ }
+
+
++
++/*
++ * Block retiring for handling a broken block.
++ */
+
+-static int yaffs_VerifyTnodeWorker(yaffs_Object *obj, yaffs_Tnode *tn,
+- __u32 level, int chunkOffset)
++static void yaffs_retire_block(yaffs_dev_t *dev, int flash_block)
+ {
+- int i;
+- yaffs_Device *dev = obj->myDev;
+- int ok = 1;
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, flash_block);
+
+- if (tn) {
+- if (level > 0) {
++ yaffs2_checkpt_invalidate(dev);
++
++ yaffs2_clear_oldest_dirty_seq(dev,bi);
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+- if (tn->internal[i]) {
+- ok = yaffs_VerifyTnodeWorker(obj,
+- tn->internal[i],
+- level - 1,
+- (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
+- }
+- }
+- } else if (level == 0) {
+- yaffs_ExtendedTags tags;
+- __u32 objectId = obj->objectId;
++ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
++ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR(
++ "yaffs: Failed to mark bad and erase block %d"
++ TENDSTR), flash_block));
++ } else {
++ yaffs_ext_tags tags;
++ int chunk_id = flash_block * dev->param.chunks_per_block;
+
+- chunkOffset <<= YAFFS_TNODES_LEVEL0_BITS;
++ __u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+- for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++ memset(buffer, 0xff, dev->data_bytes_per_chunk);
++ yaffs_init_tags(&tags);
++ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
++ if (dev->param.write_chunk_tags_fn(dev, chunk_id -
++ dev->chunk_offset, buffer, &tags) != YAFFS_OK)
++ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
++ TCONT("write bad block marker to block %d")
++ TENDSTR), flash_block));
+
+- if (theChunk > 0) {
+- /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
+- if (tags.objectId != objectId || tags.chunkId != chunkOffset) {
+- T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+- objectId, chunkOffset, theChunk,
+- tags.objectId, tags.chunkId));
+- }
+- }
+- chunkOffset++;
+- }
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+ }
+
+- return ok;
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++ bi->gc_prioritise = 0;
++ bi->needs_retiring = 0;
+
++ dev->n_retired_blocks++;
+ }
+
++/*
++ * Functions for robustisizing TODO
++ *
++ */
+
+-static void yaffs_VerifyFile(yaffs_Object *obj)
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
++ const __u8 *data,
++ const yaffs_ext_tags *tags)
+ {
+- int requiredTallness;
+- int actualTallness;
+- __u32 lastChunk;
+- __u32 x;
+- __u32 i;
+- yaffs_Device *dev;
+- yaffs_ExtendedTags tags;
+- yaffs_Tnode *tn;
+- __u32 objectId;
++ dev=dev;
++ nand_chunk=nand_chunk;
++ data=data;
++ tags=tags;
++}
+
+- if (!obj)
+- return;
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_ext_tags *tags)
++{
++ dev=dev;
++ nand_chunk=nand_chunk;
++ tags=tags;
++}
+
+- if (yaffs_SkipVerification(obj->myDev))
+- return;
++void yaffs_handle_chunk_error(yaffs_dev_t *dev, yaffs_block_info_t *bi)
++{
++ if (!bi->gc_prioritise) {
++ bi->gc_prioritise = 1;
++ dev->has_pending_prioritised_gc = 1;
++ bi->chunk_error_strikes++;
+
+- dev = obj->myDev;
+- objectId = obj->objectId;
++ if (bi->chunk_error_strikes > 3) {
++ bi->needs_retiring = 1; /* Too many stikes, so retire this */
++ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
+
+- /* Check file size is consistent with tnode depth */
+- lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
+- x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
+- requiredTallness = 0;
+- while (x > 0) {
+- x >>= YAFFS_TNODES_INTERNAL_BITS;
+- requiredTallness++;
++ }
+ }
++}
+
+- actualTallness = obj->variant.fileVariant.topLevel;
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk,
++ int erasedOk)
++{
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, flash_block);
+
+- if (requiredTallness > actualTallness)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
+- obj->objectId, actualTallness, requiredTallness));
++ yaffs_handle_chunk_error(dev, bi);
+
++ if (erasedOk) {
++ /* Was an actual write failure, so mark the block for retirement */
++ bi->needs_retiring = 1;
++ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("**>> Block %d needs retiring" TENDSTR), flash_block));
++ }
+
+- /* Check that the chunks in the tnode tree are all correct.
+- * We do this by scanning through the tnode tree and
+- * checking the tags for every chunk match.
+- */
++ /* Delete the chunk */
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ yaffs_skip_rest_of_block(dev);
++}
+
+- if (yaffs_SkipNANDVerification(dev))
+- return;
+
+- for (i = 1; i <= lastChunk; i++) {
+- tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant, i);
++/*---------------- Name handling functions ------------*/
+
+- if (tn) {
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+- if (theChunk > 0) {
+- /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
+- if (tags.objectId != objectId || tags.chunkId != i) {
+- T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+- objectId, i, theChunk,
+- tags.objectId, tags.chunkId));
+- }
+- }
++static __u16 yaffs_calc_name_sum(const YCHAR *name)
++{
++ __u16 sum = 0;
++ __u16 i = 1;
++
++ const YUCHAR *bname = (const YUCHAR *) name;
++ if (bname) {
++ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++ sum += yaffs_toupper(*bname) * i;
++#else
++ sum += (*bname) * i;
++#endif
++ i++;
++ bname++;
+ }
+ }
++ return sum;
+ }
+
+-
+-static void yaffs_VerifyHardLink(yaffs_Object *obj)
++void yaffs_set_obj_name(yaffs_obj_t *obj, const YCHAR *name)
+ {
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-
+- /* Verify sane equivalent object */
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++ memset(obj->short_name, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
++ if (name && yaffs_strnlen(name,YAFFS_SHORT_NAME_LENGTH+1) <= YAFFS_SHORT_NAME_LENGTH)
++ yaffs_strcpy(obj->short_name, name);
++ else
++ obj->short_name[0] = _Y('\0');
++#endif
++ obj->sum = yaffs_calc_name_sum(name);
+ }
+
+-static void yaffs_VerifySymlink(yaffs_Object *obj)
++void yaffs_set_obj_name_from_oh(yaffs_obj_t *obj, const yaffs_obj_header *oh)
+ {
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-
+- /* Verify symlink string */
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ YCHAR tmpName[YAFFS_MAX_NAME_LENGTH+1];
++ memset(tmpName,0,sizeof(tmpName));
++ yaffs_load_name_from_oh(obj->my_dev,tmpName,oh->name,YAFFS_MAX_NAME_LENGTH+1);
++ yaffs_set_obj_name(obj,tmpName);
++#else
++ yaffs_set_obj_name(obj,oh->name);
++#endif
+ }
+
+-static void yaffs_VerifySpecial(yaffs_Object *obj)
+-{
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-}
+-
+-static void yaffs_VerifyObject(yaffs_Object *obj)
+-{
+- yaffs_Device *dev;
+-
+- __u32 chunkMin;
+- __u32 chunkMax;
+-
+- __u32 chunkIdOk;
+- __u32 chunkInRange;
+- __u32 chunkShouldNotBeDeleted;
+- __u32 chunkValid;
+-
+- if (!obj)
+- return;
+-
+- if (obj->beingCreated)
+- return;
++/*-------------------- TNODES -------------------
+
+- dev = obj->myDev;
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
+
+- if (yaffs_SkipVerification(dev))
+- return;
+
+- /* Check sane object header chunk */
++yaffs_tnode_t *yaffs_get_tnode(yaffs_dev_t *dev)
++{
++ yaffs_tnode_t *tn = yaffs_alloc_raw_tnode(dev);
++ if (tn){
++ memset(tn, 0, dev->tnode_size);
++ dev->n_tnodes++;
++ }
+
+- chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
+- chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
+
+- chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax);
+- chunkIdOk = chunkInRange || obj->hdrChunk == 0;
+- chunkValid = chunkInRange &&
+- yaffs_CheckChunkBit(dev,
+- obj->hdrChunk / dev->nChunksPerBlock,
+- obj->hdrChunk % dev->nChunksPerBlock);
+- chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++ return tn;
++}
+
+- if (!obj->fake &&
+- (!chunkIdOk || chunkShouldNotBeDeleted)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
+- obj->objectId, obj->hdrChunk,
+- chunkIdOk ? "" : ",out of range",
+- chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
+- }
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_free_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ yaffs_free_raw_tnode(dev,tn);
++ dev->n_tnodes--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
++}
+
+- if (chunkValid && !yaffs_SkipNANDVerification(dev)) {
+- yaffs_ExtendedTags tags;
+- yaffs_ObjectHeader *oh;
+- __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++static void yaffs_deinit_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_deinit_raw_tnodes_and_objs(dev);
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
++}
+
+- oh = (yaffs_ObjectHeader *)buffer;
+
+- yaffs_ReadChunkWithTagsFromNAND(dev, obj->hdrChunk, buffer,
+- &tags);
++void yaffs_load_tnode_0(yaffs_dev_t *dev, yaffs_tnode_t *tn, unsigned pos,
++ unsigned val)
++{
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 mask;
+
+- yaffs_VerifyObjectHeader(obj, oh, &tags, 1);
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++ val >>= dev->chunk_grp_bits;
+
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+- }
++ bitInMap = pos * dev->tnode_width;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- /* Verify it has a parent */
+- if (obj && !obj->fake &&
+- (!obj->parent || obj->parent->myDev != dev)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
+- obj->objectId, obj->parent));
+- }
++ mask = dev->tnode_mask << bitInWord;
+
+- /* Verify parent is a directory */
+- if (obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
+- obj->objectId, obj->parent->variantType));
+- }
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val << bitInWord));
+
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- yaffs_VerifyFile(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- yaffs_VerifySymlink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- yaffs_VerifyDirectory(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- yaffs_VerifyHardLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- yaffs_VerifySpecial(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- default:
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has illegaltype %d"TENDSTR),
+- obj->objectId, obj->variantType));
+- break;
++ if (dev->tnode_width > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ mask = dev->tnode_mask >> (/*dev->tnode_width -*/ bitInWord);
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val >> bitInWord));
+ }
+ }
+
+-static void yaffs_VerifyObjects(yaffs_Device *dev)
++__u32 yaffs_get_group_base(yaffs_dev_t *dev, yaffs_tnode_t *tn,
++ unsigned pos)
+ {
+- yaffs_Object *obj;
+- int i;
+- struct ylist_head *lh;
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 val;
+
+- if (yaffs_SkipVerification(dev))
+- return;
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+- /* Iterate through the objects in each hash entry */
++ bitInMap = pos * dev->tnode_width;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+- ylist_for_each(lh, &dev->objectBucket[i].list) {
+- if (lh) {
+- obj = ylist_entry(lh, yaffs_Object, hashLink);
+- yaffs_VerifyObject(obj);
+- }
+- }
+- }
+-}
++ val = map[wordInMap] >> bitInWord;
+
++ if (dev->tnode_width > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ val |= (map[wordInMap] << bitInWord);
++ }
+
+-/*
+- * Simple hash function. Needs to have a reasonable spread
+- */
++ val &= dev->tnode_mask;
++ val <<= dev->chunk_grp_bits;
+
+-static Y_INLINE int yaffs_HashFunction(int n)
+-{
+- n = abs(n);
+- return n % YAFFS_NOBJECT_BUCKETS;
++ return val;
+ }
+
+-/*
+- * Access functions to useful fake objects.
+- * Note that root might have a presence in NAND if permissions are set.
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of top_level
++ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+-yaffs_Object *yaffs_Root(yaffs_Device *dev)
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++yaffs_tnode_t *yaffs_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id)
+ {
+- return dev->rootDir;
+-}
++ yaffs_tnode_t *tn = file_struct->top;
++ __u32 i;
++ int requiredTallness;
++ int level = file_struct->top_level;
+
+-yaffs_Object *yaffs_LostNFound(yaffs_Device *dev)
+-{
+- return dev->lostNFoundDir;
+-}
++ dev=dev;
+
++ /* Check sane level and chunk Id */
++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
+
+-/*
+- * Erased NAND checking functions
+- */
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
+
+-int yaffs_CheckFF(__u8 *buffer, int nBytes)
+-{
+- /* Horrible, slow implementation */
+- while (nBytes--) {
+- if (*buffer != 0xFF)
+- return 0;
+- buffer++;
++ /* First check we're tall enough (ie enough top_level) */
++
++ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ requiredTallness = 0;
++ while (i) {
++ i >>= YAFFS_TNODES_INTERNAL_BITS;
++ requiredTallness++;
+ }
+- return 1;
+-}
+
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND)
+-{
+- int retval = YAFFS_OK;
+- __u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
+- yaffs_ExtendedTags tags;
+- int result;
++ if (requiredTallness > file_struct->top_level)
++ return NULL; /* Not tall enough, so we can't find it */
++
++ /* Traverse down to level 0 */
++ while (level > 0 && tn) {
++ tn = tn->internal[(chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (level - 1) *
++ YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK];
++ level--;
++ }
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
++ return tn;
++}
+
+- if (tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
+- retval = YAFFS_FAIL;
++/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
++ * This happens in two steps:
++ * 1. If the tree isn't tall enough, then make it taller.
++ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
++ * be plugged into the ttree.
++ */
+
+- if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
+- T(YAFFS_TRACE_NANDACCESS,
+- (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
+- retval = YAFFS_FAIL;
+- }
++yaffs_tnode_t *yaffs_add_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id,
++ yaffs_tnode_t *passed_tn)
++{
++ int requiredTallness;
++ int i;
++ int l;
++ yaffs_tnode_t *tn;
+
+- yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++ __u32 x;
+
+- return retval;
+
+-}
++ /* Check sane level and page Id */
++ if (file_struct->top_level < 0 || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
+
+-static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- const __u8 *data,
+- yaffs_ExtendedTags *tags,
+- int useReserve)
+-{
+- int attempts = 0;
+- int writeOk = 0;
+- int chunk;
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
+
+- yaffs_InvalidateCheckpoint(dev);
++ /* First check we're tall enough (ie enough top_level) */
+
+- do {
+- yaffs_BlockInfo *bi = 0;
+- int erasedOk = 0;
++ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ requiredTallness = 0;
++ while (x) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ requiredTallness++;
++ }
+
+- chunk = yaffs_AllocateChunk(dev, useReserve, &bi);
+- if (chunk < 0) {
+- /* no space */
+- break;
+- }
+
+- /* First check this chunk is erased, if it needs
+- * checking. The checking policy (unless forced
+- * always on) is as follows:
+- *
+- * Check the first page we try to write in a block.
+- * If the check passes then we don't need to check any
+- * more. If the check fails, we check again...
+- * If the block has been erased, we don't need to check.
+- *
+- * However, if the block has been prioritised for gc,
+- * then we think there might be something odd about
+- * this block and stop using it.
+- *
+- * Rationale: We should only ever see chunks that have
+- * not been erased if there was a partially written
+- * chunk due to power loss. This checking policy should
+- * catch that case with very few checks and thus save a
+- * lot of checks that are most likely not needed.
+- */
+- if (bi->gcPrioritise) {
+- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+- /* try another chunk */
+- continue;
+- }
++ if (requiredTallness > file_struct->top_level) {
++ /* Not tall enough, gotta make the tree taller */
++ for (i = file_struct->top_level; i < requiredTallness; i++) {
+
+- /* let's give it a try */
+- attempts++;
++ tn = yaffs_get_tnode(dev);
+
+-#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+- bi->skipErasedCheck = 0;
+-#endif
+- if (!bi->skipErasedCheck) {
+- erasedOk = yaffs_CheckChunkErased(dev, chunk);
+- if (erasedOk != YAFFS_OK) {
++ if (tn) {
++ tn->internal[0] = file_struct->top;
++ file_struct->top = tn;
++ file_struct->top_level++;
++ } else {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs chunk %d was not erased"
+- TENDSTR), chunk));
+-
+- /* try another chunk */
+- continue;
++ (TSTR("yaffs: no more tnodes" TENDSTR)));
++ return NULL;
+ }
+- bi->skipErasedCheck = 1;
+ }
++ }
+
+- writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk,
+- data, tags);
+- if (writeOk != YAFFS_OK) {
+- yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
+- /* try another chunk */
+- continue;
+- }
++ /* Traverse down to level 0, adding anything we need */
+
+- /* Copy the data into the robustification buffer */
+- yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
++ l = file_struct->top_level;
++ tn = file_struct->top;
+
+- } while (writeOk != YAFFS_OK &&
+- (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++ if (l > 0) {
++ while (l > 0 && tn) {
++ x = (chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK;
+
+- if (!writeOk)
+- chunk = -1;
+
+- if (attempts > 1) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs write required %d attempts" TENDSTR),
+- attempts));
++ if ((l > 1) && !tn->internal[x]) {
++ /* Add missing non-level-zero tnode */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if(!tn->internal[x])
++ return NULL;
++ } else if (l == 1) {
++ /* Looking from level 1 at level 0 */
++ if (passed_tn) {
++ /* If we already have one, then release it.*/
++ if (tn->internal[x])
++ yaffs_free_tnode(dev, tn->internal[x]);
++ tn->internal[x] = passed_tn;
++
++ } else if (!tn->internal[x]) {
++ /* Don't have one, none passed in */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if(!tn->internal[x])
++ return NULL;
++ }
++ }
+
+- dev->nRetriedWrites += (attempts - 1);
++ tn = tn->internal[x];
++ l--;
++ }
++ } else {
++ /* top is level 0 */
++ if (passed_tn) {
++ memcpy(tn, passed_tn, (dev->tnode_width * YAFFS_NTNODES_LEVEL0)/8);
++ yaffs_free_tnode(dev, passed_tn);
++ }
+ }
+
+- return chunk;
++ return tn;
+ }
+
+-/*
+- * Block retiring for handling a broken block.
+- */
+-
+-static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND)
++static int yaffs_find_chunk_in_group(yaffs_dev_t *dev, int theChunk,
++ yaffs_ext_tags *tags, int obj_id,
++ int inode_chunk)
+ {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++ int j;
+
+- yaffs_InvalidateCheckpoint(dev);
++ for (j = 0; theChunk && j < dev->chunk_grp_size; j++) {
++ if (yaffs_check_chunk_bit(dev, theChunk / dev->param.chunks_per_block,
++ theChunk % dev->param.chunks_per_block)) {
++
++ if(dev->chunk_grp_size == 1)
++ return theChunk;
++ else {
++ yaffs_rd_chunk_tags_nand(dev, theChunk, NULL,
++ tags);
++ if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
++ /* found it; */
++ return theChunk;
++ }
++ }
++ }
++ theChunk++;
++ }
++ return -1;
++}
+
+- if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) {
+- if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR(
+- "yaffs: Failed to mark bad and erase block %d"
+- TENDSTR), blockInNAND));
+- } else {
+- yaffs_ExtendedTags tags;
+- int chunkId = blockInNAND * dev->nChunksPerBlock;
++#if 0
++/* Experimental code not being used yet. Might speed up file deletion */
++/* DeleteWorker scans backwards through the tnode tree and deletes all the
++ * chunks and tnodes in the file.
++ * Returns 1 if the tree was deleted.
++ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
++ */
+
+- __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++static int yaffs_del_worker(yaffs_obj_t *in, yaffs_tnode_t *tn, __u32 level,
++ int chunk_offset, int *limit)
++{
++ int i;
++ int inode_chunk;
++ int theChunk;
++ yaffs_ext_tags tags;
++ int foundChunk;
++ yaffs_dev_t *dev = in->my_dev;
+
+- memset(buffer, 0xff, dev->nDataBytesPerChunk);
+- yaffs_InitialiseTags(&tags);
+- tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK;
+- if (dev->writeChunkWithTagsToNAND(dev, chunkId -
+- dev->chunkOffset, buffer, &tags) != YAFFS_OK)
+- T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
+- TCONT("write bad block marker to block %d")
+- TENDSTR), blockInNAND));
++ int allDone = 1;
+
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+- }
+- }
++ if (tn) {
++ if (level > 0) {
++ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++ i--) {
++ if (tn->internal[i]) {
++ if (limit && (*limit) < 0) {
++ allDone = 0;
++ } else {
++ allDone =
++ yaffs_del_worker(in,
++ tn->
++ internal
++ [i],
++ level -
++ 1,
++ (chunk_offset
++ <<
++ YAFFS_TNODES_INTERNAL_BITS)
++ + i,
++ limit);
++ }
++ if (allDone) {
++ yaffs_free_tnode(dev,
++ tn->
++ internal[i]);
++ tn->internal[i] = NULL;
++ }
++ }
++ }
++ return (allDone) ? 1 : 0;
++ } else if (level == 0) {
++ int hitLimit = 0;
+
+- bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+- bi->gcPrioritise = 0;
+- bi->needsRetiring = 0;
++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
++ i--) {
++ theChunk = yaffs_get_group_base(dev, tn, i);
++ if (theChunk) {
+
+- dev->nRetiredBlocks++;
+-}
++ inode_chunk = (chunk_offset <<
++ YAFFS_TNODES_LEVEL0_BITS) + i;
+
+-/*
+- * Functions for robustisizing TODO
+- *
+- */
++ foundChunk =
++ yaffs_find_chunk_in_group(dev,
++ theChunk,
++ &tags,
++ in->obj_id,
++ inode_chunk);
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data,
+- const yaffs_ExtendedTags *tags)
+-{
+-}
++ if (foundChunk > 0) {
++ yaffs_chunk_del(dev,
++ foundChunk, 1,
++ __LINE__);
++ in->n_data_chunks--;
++ if (limit) {
++ *limit = *limit - 1;
++ if (*limit <= 0)
++ hitLimit = 1;
++ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_ExtendedTags *tags)
+-{
+-}
++ }
+
+-void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
+-{
+- if (!bi->gcPrioritise) {
+- bi->gcPrioritise = 1;
+- dev->hasPendingPrioritisedGCs = 1;
+- bi->chunkErrorStrikes++;
++ yaffs_load_tnode_0(dev, tn, i, 0);
++ }
+
+- if (bi->chunkErrorStrikes > 3) {
+- bi->needsRetiring = 1; /* Too many stikes, so retire this */
+- T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
++ }
++ return (i < 0) ? 1 : 0;
+
+ }
++
+ }
++
++ return 1;
++
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
+- int erasedOk)
++#endif
++
++static void yaffs_soft_del_chunk(yaffs_dev_t *dev, int chunk)
+ {
+- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++ yaffs_block_info_t *theBlock;
++ unsigned block_no;
+
+- yaffs_HandleChunkError(dev, bi);
++ T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+
+- if (erasedOk) {
+- /* Was an actual write failure, so mark the block for retirement */
+- bi->needsRetiring = 1;
+- T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
++ block_no = chunk / dev->param.chunks_per_block;
++ theBlock = yaffs_get_block_info(dev, block_no);
++ if (theBlock) {
++ theBlock->soft_del_pages++;
++ dev->n_free_chunks++;
++ yaffs2_update_oldest_dirty_seq(dev, block_no, theBlock);
+ }
+-
+- /* Delete the chunk */
+- yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+ }
+
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
++ * of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
++ */
+
+-/*---------------- Name handling functions ------------*/
+-
+-static __u16 yaffs_CalcNameSum(const YCHAR *name)
++static int yaffs_soft_del_worker(yaffs_obj_t *in, yaffs_tnode_t *tn,
++ __u32 level, int chunk_offset)
+ {
+- __u16 sum = 0;
+- __u16 i = 1;
++ int i;
++ int theChunk;
++ int allDone = 1;
++ yaffs_dev_t *dev = in->my_dev;
+
+- const YUCHAR *bname = (const YUCHAR *) name;
+- if (bname) {
+- while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
++ if (tn) {
++ if (level > 0) {
++
++ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++ i--) {
++ if (tn->internal[i]) {
++ allDone =
++ yaffs_soft_del_worker(in,
++ tn->
++ internal[i],
++ level - 1,
++ (chunk_offset
++ <<
++ YAFFS_TNODES_INTERNAL_BITS)
++ + i);
++ if (allDone) {
++ yaffs_free_tnode(dev,
++ tn->
++ internal[i]);
++ tn->internal[i] = NULL;
++ } else {
++ /* Hoosterman... how could this happen? */
++ }
++ }
++ }
++ return (allDone) ? 1 : 0;
++ } else if (level == 0) {
++
++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++ theChunk = yaffs_get_group_base(dev, tn, i);
++ if (theChunk) {
++ /* Note this does not find the real chunk, only the chunk group.
++ * We make an assumption that a chunk group is not larger than
++ * a block.
++ */
++ yaffs_soft_del_chunk(dev, theChunk);
++ yaffs_load_tnode_0(dev, tn, i, 0);
++ }
++
++ }
++ return 1;
+
+-#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+- sum += yaffs_toupper(*bname) * i;
+-#else
+- sum += (*bname) * i;
+-#endif
+- i++;
+- bname++;
+ }
++
+ }
+- return sum;
++
++ return 1;
++
+ }
+
+-static void yaffs_SetObjectName(yaffs_Object *obj, const YCHAR *name)
++static void yaffs_soft_del_file(yaffs_obj_t *obj)
+ {
+-#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- memset(obj->shortName, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
+- if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH)
+- yaffs_strcpy(obj->shortName, name);
+- else
+- obj->shortName[0] = _Y('\0');
+-#endif
+- obj->sum = yaffs_CalcNameSum(name);
++ if (obj->deleted &&
++ obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
++ if (obj->n_data_chunks <= 0) {
++ /* Empty file with no duplicate object headers, just delete it immediately */
++ yaffs_free_tnode(obj->my_dev,
++ obj->variant.file_variant.top);
++ obj->variant.file_variant.top = NULL;
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("yaffs: Deleting empty file %d" TENDSTR),
++ obj->obj_id));
++ yaffs_generic_obj_del(obj);
++ } else {
++ yaffs_soft_del_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.file_variant.
++ top_level, 0);
++ obj->soft_del = 1;
++ }
++ }
+ }
+
+-/*-------------------- TNODES -------------------
+-
+- * List of spare tnodes
+- * The list is hooked together using the first pointer
+- * in the tnode.
+- */
+-
+-/* yaffs_CreateTnodes creates a bunch more tnodes and
+- * adds them to the tnode free list.
+- * Don't use this function directly
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ *
++ * This function is recursive. For levels > 0 the function is called again on
++ * any sub-tree. For level == 0 we just check if the sub-tree has data.
++ * If there is no data in a subtree then it is pruned.
+ */
+
+-static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes)
++static yaffs_tnode_t *yaffs_prune_worker(yaffs_dev_t *dev, yaffs_tnode_t *tn,
++ __u32 level, int del0)
+ {
+ int i;
+- int tnodeSize;
+- yaffs_Tnode *newTnodes;
+- __u8 *mem;
+- yaffs_Tnode *curr;
+- yaffs_Tnode *next;
+- yaffs_TnodeList *tnl;
++ int hasData;
+
+- if (nTnodes < 1)
+- return YAFFS_OK;
++ if (tn) {
++ hasData = 0;
+
+- /* Calculate the tnode size in bytes for variable width tnode support.
+- * Must be a multiple of 32-bits */
+- tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ if(level > 0){
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i]) {
++ tn->internal[i] =
++ yaffs_prune_worker(dev, tn->internal[i],
++ level - 1,
++ (i == 0) ? del0 : 1);
++ }
+
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
++ if (tn->internal[i])
++ hasData++;
++ }
++ } else {
++ int tnode_size_u32 = dev->tnode_size/sizeof(__u32);
++ __u32 *map = (__u32 *)tn;
+
+- /* make these things */
++ for(i = 0; !hasData && i < tnode_size_u32; i++){
++ if(map[i])
++ hasData++;
++ }
++ }
+
+- newTnodes = YMALLOC(nTnodes * tnodeSize);
+- mem = (__u8 *)newTnodes;
++ if (hasData == 0 && del0) {
++ /* Free and return NULL */
+
+- if (!newTnodes) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
+- return YAFFS_FAIL;
+- }
++ yaffs_free_tnode(dev, tn);
++ tn = NULL;
++ }
+
+- /* Hook them into the free list */
+-#if 0
+- for (i = 0; i < nTnodes - 1; i++) {
+- newTnodes[i].internal[0] = &newTnodes[i + 1];
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+-#endif
+ }
+
+- newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes;
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+-#endif
+- dev->freeTnodes = newTnodes;
+-#else
+- /* New hookup for wide tnodes */
+- for (i = 0; i < nTnodes - 1; i++) {
+- curr = (yaffs_Tnode *) &mem[i * tnodeSize];
+- next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
+- curr->internal[0] = next;
+- }
++ return tn;
+
+- curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize];
+- curr->internal[0] = dev->freeTnodes;
+- dev->freeTnodes = (yaffs_Tnode *)mem;
++}
+
+-#endif
++static int yaffs_prune_tree(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct)
++{
++ int i;
++ int hasData;
++ int done = 0;
++ yaffs_tnode_t *tn;
+
++ if (file_struct->top_level > 0) {
++ file_struct->top =
++ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
++
++ /* Now we have a tree with all the non-zero branches NULL but the height
++ * is the same as it was.
++ * Let's see if we can trim internal tnodes to shorten the tree.
++ * We can do this if only the 0th element in the tnode is in use
++ * (ie all the non-zero are NULL)
++ */
+
+- dev->nFreeTnodes += nTnodes;
+- dev->nTnodesCreated += nTnodes;
++ while (file_struct->top_level && !done) {
++ tn = file_struct->top;
+
+- /* Now add this bunch of tnodes to a list for freeing up.
+- * NB If we can't add this to the management list it isn't fatal
+- * but it just means we can't free this bunch of tnodes later.
+- */
++ hasData = 0;
++ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i])
++ hasData++;
++ }
+
+- tnl = YMALLOC(sizeof(yaffs_TnodeList));
+- if (!tnl) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs: Could not add tnodes to management list" TENDSTR)));
+- return YAFFS_FAIL;
+- } else {
+- tnl->tnodes = newTnodes;
+- tnl->next = dev->allocatedTnodeList;
+- dev->allocatedTnodeList = tnl;
++ if (!hasData) {
++ file_struct->top = tn->internal[0];
++ file_struct->top_level--;
++ yaffs_free_tnode(dev, tn);
++ } else {
++ done = 1;
++ }
++ }
+ }
+
+- T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
+-
+ return YAFFS_OK;
+ }
+
+-/* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
++/*-------------------- End of File Structure functions.-------------------*/
++
+
+-static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device *dev)
++/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
++static yaffs_obj_t *yaffs_alloc_empty_obj(yaffs_dev_t *dev)
+ {
+- yaffs_Tnode *tn = NULL;
++ yaffs_obj_t *obj = yaffs_alloc_raw_obj(dev);
+
+- /* If there are none left make more */
+- if (!dev->freeTnodes)
+- yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
+-
+- if (dev->freeTnodes) {
+- tn = dev->freeTnodes;
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) {
+- /* Hoosterman, this thing looks like it isn't in the list */
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs: Tnode list bug 1" TENDSTR)));
+- }
+-#endif
+- dev->freeTnodes = dev->freeTnodes->internal[0];
+- dev->nFreeTnodes--;
+- }
++ if (obj) {
++ dev->n_obj++;
+
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++ /* Now sweeten it up... */
+
+- return tn;
+-}
++ memset(obj, 0, sizeof(yaffs_obj_t));
++ obj->being_created = 1;
+
+-static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev)
+-{
+- yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
+- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0;
++ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
++ YINIT_LIST_HEAD(&(obj->hard_links));
++ YINIT_LIST_HEAD(&(obj->hash_link));
++ YINIT_LIST_HEAD(&obj->siblings);
+
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
+
+- if (tn)
+- memset(tn, 0, tnodeSize);
++ /* Now make the directory sane */
++ if (dev->root_dir) {
++ obj->parent = dev->root_dir;
++ ylist_add(&(obj->siblings), &dev->root_dir->variant.dir_variant.children);
++ }
+
+- return tn;
++ /* Add it to the lost and found directory.
++ * NB Can't put root or lostNFound in lostNFound so
++ * check if lostNFound exists first
++ */
++ if (dev->lost_n_found)
++ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++
++ obj->being_created = 0;
++ }
++
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
++
++ return obj;
+ }
+
+-/* FreeTnode frees up a tnode and puts it back on the free list */
+-static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn)
++static yaffs_obj_t *yaffs_create_fake_dir(yaffs_dev_t *dev, int number,
++ __u32 mode)
+ {
+- if (tn) {
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) {
+- /* Hoosterman, this thing looks like it is already in the list */
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs: Tnode list bug 2" TENDSTR)));
+- }
+- tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+-#endif
+- tn->internal[0] = dev->freeTnodes;
+- dev->freeTnodes = tn;
+- dev->nFreeTnodes++;
++
++ yaffs_obj_t *obj =
++ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++ if (obj) {
++ obj->fake = 1; /* it is fake so it might have no NAND presence... */
++ obj->rename_allowed = 0; /* ... and we're not allowed to rename it... */
++ obj->unlink_allowed = 0; /* ... or unlink it */
++ obj->deleted = 0;
++ obj->unlinked = 0;
++ obj->yst_mode = mode;
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0; /* Not a valid chunk. */
++ }
++
++ return obj;
++
++}
++
++static void yaffs_unhash_obj(yaffs_obj_t *obj)
++{
++ int bucket;
++ yaffs_dev_t *dev = obj->my_dev;
++
++ /* If it is still linked into the bucket list, free from the list */
++ if (!ylist_empty(&obj->hash_link)) {
++ ylist_del_init(&obj->hash_link);
++ bucket = yaffs_hash_fn(obj->obj_id);
++ dev->obj_bucket[bucket].count--;
+ }
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+ }
+
+-static void yaffs_DeinitialiseTnodes(yaffs_Device *dev)
++/* FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_free_obj(yaffs_obj_t *obj)
+ {
+- /* Free the list of allocated tnodes */
+- yaffs_TnodeList *tmp;
++ yaffs_dev_t *dev = obj->my_dev;
++
++ T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), obj, obj->my_inode));
+
+- while (dev->allocatedTnodeList) {
+- tmp = dev->allocatedTnodeList->next;
++ if (!obj)
++ YBUG();
++ if (obj->parent)
++ YBUG();
++ if (!ylist_empty(&obj->siblings))
++ YBUG();
+
+- YFREE(dev->allocatedTnodeList->tnodes);
+- YFREE(dev->allocatedTnodeList);
+- dev->allocatedTnodeList = tmp;
+
++ if (obj->my_inode) {
++ /* We're still hooked up to a cached inode.
++ * Don't delete now, but mark for later deletion
++ */
++ obj->defered_free = 1;
++ return;
+ }
+
+- dev->freeTnodes = NULL;
+- dev->nFreeTnodes = 0;
+-}
++ yaffs_unhash_obj(obj);
+
+-static void yaffs_InitialiseTnodes(yaffs_Device *dev)
+-{
+- dev->allocatedTnodeList = NULL;
+- dev->freeTnodes = NULL;
+- dev->nFreeTnodes = 0;
+- dev->nTnodesCreated = 0;
++ yaffs_free_raw_obj(dev,obj);
++ dev->n_obj--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
+ }
+
+
+-void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos,
+- unsigned val)
++void yaffs_handle_defered_free(yaffs_obj_t *obj)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 mask;
+-
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
+- val >>= dev->chunkGroupBits;
++ if (obj->defered_free)
++ yaffs_free_obj(obj);
++}
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap / 32;
+- bitInWord = bitInMap & (32 - 1);
++static void yaffs_init_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ int i;
+
+- mask = dev->tnodeMask << bitInWord;
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
+
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val << bitInWord));
++ yaffs_init_raw_tnodes_and_objs(dev);
+
+- if (dev->tnodeWidth > (32 - bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val >> bitInWord));
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ YINIT_LIST_HEAD(&dev->obj_bucket[i].list);
++ dev->obj_bucket[i].count = 0;
+ }
+ }
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
+- unsigned pos)
++static int yaffs_find_nice_bucket(yaffs_dev_t *dev)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 val;
++ int i;
++ int l = 999;
++ int lowest = 999999;
+
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap / 32;
+- bitInWord = bitInMap & (32 - 1);
++ /* Search for the shortest list or one that
++ * isn't too long.
++ */
+
+- val = map[wordInMap] >> bitInWord;
++ for (i = 0; i < 10 && lowest > 4; i++) {
++ dev->bucket_finder++;
++ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
++ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
++ lowest = dev->obj_bucket[dev->bucket_finder].count;
++ l = dev->bucket_finder;
++ }
+
+- if (dev->tnodeWidth > (32 - bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- val |= (map[wordInMap] << bitInWord);
+ }
+
+- val &= dev->tnodeMask;
+- val <<= dev->chunkGroupBits;
+-
+- return val;
++ return l;
+ }
+
+-/* ------------------- End of individual tnode manipulation -----------------*/
+-
+-/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+- * The look up tree is represented by the top tnode and the number of topLevel
+- * in the tree. 0 means only the level 0 tnode is in the tree.
+- */
+-
+-/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct,
+- __u32 chunkId)
++static int yaffs_new_obj_id(yaffs_dev_t *dev)
+ {
+- yaffs_Tnode *tn = fStruct->top;
+- __u32 i;
+- int requiredTallness;
+- int level = fStruct->topLevel;
+-
+- /* Check sane level and chunk Id */
+- if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+- return NULL;
++ int bucket = yaffs_find_nice_bucket(dev);
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID)
+- return NULL;
++ /* Now find an object value that has not already been taken
++ * by scanning the list.
++ */
+
+- /* First check we're tall enough (ie enough topLevel) */
++ int found = 0;
++ struct ylist_head *i;
+
+- i = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
+- requiredTallness = 0;
+- while (i) {
+- i >>= YAFFS_TNODES_INTERNAL_BITS;
+- requiredTallness++;
+- }
++ __u32 n = (__u32) bucket;
+
+- if (requiredTallness > fStruct->topLevel)
+- return NULL; /* Not tall enough, so we can't find it */
++ /* yaffs_check_obj_hash_sane(); */
+
+- /* Traverse down to level 0 */
+- while (level > 0 && tn) {
+- tn = tn->internal[(chunkId >>
+- (YAFFS_TNODES_LEVEL0_BITS +
+- (level - 1) *
+- YAFFS_TNODES_INTERNAL_BITS)) &
+- YAFFS_TNODES_INTERNAL_MASK];
+- level--;
++ while (!found) {
++ found = 1;
++ n += YAFFS_NOBJECT_BUCKETS;
++ if (1 || dev->obj_bucket[bucket].count > 0) {
++ ylist_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* If there is already one in the list */
++ if (i && ylist_entry(i, yaffs_obj_t,
++ hash_link)->obj_id == n) {
++ found = 0;
++ }
++ }
++ }
+ }
+
+- return tn;
++ return n;
+ }
+
+-/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+- * This happens in two steps:
+- * 1. If the tree isn't tall enough, then make it taller.
+- * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+- *
+- * Used when modifying the tree.
+- *
+- * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+- * be plugged into the ttree.
+- */
+-
+-static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct,
+- __u32 chunkId,
+- yaffs_Tnode *passedTn)
++static void yaffs_hash_obj(yaffs_obj_t *in)
+ {
+- int requiredTallness;
+- int i;
+- int l;
+- yaffs_Tnode *tn;
+-
+- __u32 x;
++ int bucket = yaffs_hash_fn(in->obj_id);
++ yaffs_dev_t *dev = in->my_dev;
+
++ ylist_add(&in->hash_link, &dev->obj_bucket[bucket].list);
++ dev->obj_bucket[bucket].count++;
++}
+
+- /* Check sane level and page Id */
+- if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL)
+- return NULL;
++yaffs_obj_t *yaffs_find_by_number(yaffs_dev_t *dev, __u32 number)
++{
++ int bucket = yaffs_hash_fn(number);
++ struct ylist_head *i;
++ yaffs_obj_t *in;
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID)
+- return NULL;
++ ylist_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* Look if it is in the list */
++ if (i) {
++ in = ylist_entry(i, yaffs_obj_t, hash_link);
++ if (in->obj_id == number) {
+
+- /* First check we're tall enough (ie enough topLevel) */
++ /* Don't tell the VFS about this one if it is defered free */
++ if (in->defered_free)
++ return NULL;
+
+- x = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
+- requiredTallness = 0;
+- while (x) {
+- x >>= YAFFS_TNODES_INTERNAL_BITS;
+- requiredTallness++;
++ return in;
++ }
++ }
+ }
+
++ return NULL;
++}
+
+- if (requiredTallness > fStruct->topLevel) {
+- /* Not tall enough, gotta make the tree taller */
+- for (i = fStruct->topLevel; i < requiredTallness; i++) {
++yaffs_obj_t *yaffs_new_obj(yaffs_dev_t *dev, int number,
++ yaffs_obj_type type)
++{
++ yaffs_obj_t *theObject=NULL;
++ yaffs_tnode_t *tn = NULL;
+
+- tn = yaffs_GetTnode(dev);
++ if (number < 0)
++ number = yaffs_new_obj_id(dev);
+
+- if (tn) {
+- tn->internal[0] = fStruct->top;
+- fStruct->top = tn;
+- } else {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs: no more tnodes" TENDSTR)));
+- }
+- }
++ if (type == YAFFS_OBJECT_TYPE_FILE) {
++ tn = yaffs_get_tnode(dev);
++ if (!tn)
++ return NULL;
++ }
+
+- fStruct->topLevel = requiredTallness;
++ theObject = yaffs_alloc_empty_obj(dev);
++ if (!theObject){
++ if(tn)
++ yaffs_free_tnode(dev,tn);
++ return NULL;
+ }
+
+- /* Traverse down to level 0, adding anything we need */
+
+- l = fStruct->topLevel;
+- tn = fStruct->top;
++ if (theObject) {
++ theObject->fake = 0;
++ theObject->rename_allowed = 1;
++ theObject->unlink_allowed = 1;
++ theObject->obj_id = number;
++ yaffs_hash_obj(theObject);
++ theObject->variant_type = type;
++#ifdef CONFIG_YAFFS_WINCE
++ yfsd_win_file_time_now(theObject->win_atime);
++ theObject->win_ctime[0] = theObject->win_mtime[0] =
++ theObject->win_atime[0];
++ theObject->win_ctime[1] = theObject->win_mtime[1] =
++ theObject->win_atime[1];
++
++#else
+
+- if (l > 0) {
+- while (l > 0 && tn) {
+- x = (chunkId >>
+- (YAFFS_TNODES_LEVEL0_BITS +
+- (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+- YAFFS_TNODES_INTERNAL_MASK;
++ theObject->yst_atime = theObject->yst_mtime =
++ theObject->yst_ctime = Y_CURRENT_TIME;
++#endif
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ theObject->variant.file_variant.file_size = 0;
++ theObject->variant.file_variant.scanned_size = 0;
++ theObject->variant.file_variant.shrink_size = 0xFFFFFFFF; /* max __u32 */
++ theObject->variant.file_variant.top_level = 0;
++ theObject->variant.file_variant.top = tn;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ YINIT_LIST_HEAD(&theObject->variant.dir_variant.
++ children);
++ YINIT_LIST_HEAD(&theObject->variant.dir_variant.
++ dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* No action required */
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* todo this should not happen */
++ break;
++ }
++ }
+
++ return theObject;
++}
+
+- if ((l > 1) && !tn->internal[x]) {
+- /* Add missing non-level-zero tnode */
+- tn->internal[x] = yaffs_GetTnode(dev);
++yaffs_obj_t *yaffs_find_or_create_by_number(yaffs_dev_t *dev,
++ int number,
++ yaffs_obj_type type)
++{
++ yaffs_obj_t *theObject = NULL;
+
+- } else if (l == 1) {
+- /* Looking from level 1 at level 0 */
+- if (passedTn) {
+- /* If we already have one, then release it.*/
+- if (tn->internal[x])
+- yaffs_FreeTnode(dev, tn->internal[x]);
+- tn->internal[x] = passedTn;
++ if (number > 0)
++ theObject = yaffs_find_by_number(dev, number);
+
+- } else if (!tn->internal[x]) {
+- /* Don't have one, none passed in */
+- tn->internal[x] = yaffs_GetTnode(dev);
+- }
+- }
++ if (!theObject)
++ theObject = yaffs_new_obj(dev, number, type);
+
+- tn = tn->internal[x];
+- l--;
+- }
+- } else {
+- /* top is level 0 */
+- if (passedTn) {
+- memcpy(tn, passedTn, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+- yaffs_FreeTnode(dev, passedTn);
+- }
+- }
++ return theObject;
+
+- return tn;
+ }
+
+-static int yaffs_FindChunkInGroup(yaffs_Device *dev, int theChunk,
+- yaffs_ExtendedTags *tags, int objectId,
+- int chunkInInode)
++
++YCHAR *yaffs_clone_str(const YCHAR *str)
+ {
+- int j;
++ YCHAR *newStr = NULL;
++ int len;
+
+- for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
+- if (yaffs_CheckChunkBit(dev, theChunk / dev->nChunksPerBlock,
+- theChunk % dev->nChunksPerBlock)) {
+- yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
+- tags);
+- if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
+- /* found it; */
+- return theChunk;
+- }
+- }
+- theChunk++;
++ if (!str)
++ str = _Y("");
++
++ len = yaffs_strnlen(str,YAFFS_MAX_ALIAS_LENGTH);
++ newStr = YMALLOC((len + 1) * sizeof(YCHAR));
++ if (newStr){
++ yaffs_strncpy(newStr, str,len);
++ newStr[len] = 0;
+ }
+- return -1;
+-}
++ return newStr;
+
++}
+
+-/* DeleteWorker scans backwards through the tnode tree and deletes all the
+- * chunks and tnodes in the file
+- * Returns 1 if the tree was deleted.
+- * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
++/*
++ * Mknod (create) a new object.
++ * equiv_obj only has meaning for a hard link;
++ * aliasString only has meaning for a symlink.
++ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+-static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
+- int chunkOffset, int *limit)
++static yaffs_obj_t *yaffs_create_obj(yaffs_obj_type type,
++ yaffs_obj_t *parent,
++ const YCHAR *name,
++ __u32 mode,
++ __u32 uid,
++ __u32 gid,
++ yaffs_obj_t *equiv_obj,
++ const YCHAR *aliasString, __u32 rdev)
+ {
+- int i;
+- int chunkInInode;
+- int theChunk;
+- yaffs_ExtendedTags tags;
+- int foundChunk;
+- yaffs_Device *dev = in->myDev;
+-
+- int allDone = 1;
+-
+- if (tn) {
+- if (level > 0) {
+- for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+- i--) {
+- if (tn->internal[i]) {
+- if (limit && (*limit) < 0) {
+- allDone = 0;
+- } else {
+- allDone =
+- yaffs_DeleteWorker(in,
+- tn->
+- internal
+- [i],
+- level -
+- 1,
+- (chunkOffset
+- <<
+- YAFFS_TNODES_INTERNAL_BITS)
+- + i,
+- limit);
+- }
+- if (allDone) {
+- yaffs_FreeTnode(dev,
+- tn->
+- internal[i]);
+- tn->internal[i] = NULL;
+- }
+- }
+- }
+- return (allDone) ? 1 : 0;
+- } else if (level == 0) {
+- int hitLimit = 0;
+-
+- for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
+- i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+- if (theChunk) {
+-
+- chunkInInode = (chunkOffset <<
+- YAFFS_TNODES_LEVEL0_BITS) + i;
+-
+- foundChunk =
+- yaffs_FindChunkInGroup(dev,
+- theChunk,
+- &tags,
+- in->objectId,
+- chunkInInode);
+-
+- if (foundChunk > 0) {
+- yaffs_DeleteChunk(dev,
+- foundChunk, 1,
+- __LINE__);
+- in->nDataChunks--;
+- if (limit) {
+- *limit = *limit - 1;
+- if (*limit <= 0)
+- hitLimit = 1;
+- }
+-
+- }
+-
+- yaffs_PutLevel0Tnode(dev, tn, i, 0);
+- }
+-
+- }
+- return (i < 0) ? 1 : 0;
+-
+- }
+-
+- }
+-
+- return 1;
+-
+-}
+-
+-static void yaffs_SoftDeleteChunk(yaffs_Device *dev, int chunk)
+-{
+- yaffs_BlockInfo *theBlock;
+-
+- T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+-
+- theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock);
+- if (theBlock) {
+- theBlock->softDeletions++;
+- dev->nFreeChunks++;
+- }
+-}
+-
+-/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+- * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+- * of the tnode.
+- * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+- */
+-
+-static int yaffs_SoftDeleteWorker(yaffs_Object *in, yaffs_Tnode *tn,
+- __u32 level, int chunkOffset)
+-{
+- int i;
+- int theChunk;
+- int allDone = 1;
+- yaffs_Device *dev = in->myDev;
+-
+- if (tn) {
+- if (level > 0) {
+-
+- for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+- i--) {
+- if (tn->internal[i]) {
+- allDone =
+- yaffs_SoftDeleteWorker(in,
+- tn->
+- internal[i],
+- level - 1,
+- (chunkOffset
+- <<
+- YAFFS_TNODES_INTERNAL_BITS)
+- + i);
+- if (allDone) {
+- yaffs_FreeTnode(dev,
+- tn->
+- internal[i]);
+- tn->internal[i] = NULL;
+- } else {
+- /* Hoosterman... how could this happen? */
+- }
+- }
+- }
+- return (allDone) ? 1 : 0;
+- } else if (level == 0) {
+-
+- for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+- if (theChunk) {
+- /* Note this does not find the real chunk, only the chunk group.
+- * We make an assumption that a chunk group is not larger than
+- * a block.
+- */
+- yaffs_SoftDeleteChunk(dev, theChunk);
+- yaffs_PutLevel0Tnode(dev, tn, i, 0);
+- }
+-
+- }
+- return 1;
+-
+- }
+-
+- }
+-
+- return 1;
+-
+-}
+-
+-static void yaffs_SoftDeleteFile(yaffs_Object *obj)
+-{
+- if (obj->deleted &&
+- obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
+- if (obj->nDataChunks <= 0) {
+- /* Empty file with no duplicate object headers, just delete it immediately */
+- yaffs_FreeTnode(obj->myDev,
+- obj->variant.fileVariant.top);
+- obj->variant.fileVariant.top = NULL;
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: Deleting empty file %d" TENDSTR),
+- obj->objectId));
+- yaffs_DoGenericObjectDeletion(obj);
+- } else {
+- yaffs_SoftDeleteWorker(obj,
+- obj->variant.fileVariant.top,
+- obj->variant.fileVariant.
+- topLevel, 0);
+- obj->softDeleted = 1;
+- }
+- }
+-}
+-
+-/* Pruning removes any part of the file structure tree that is beyond the
+- * bounds of the file (ie that does not point to chunks).
+- *
+- * A file should only get pruned when its size is reduced.
+- *
+- * Before pruning, the chunks must be pulled from the tree and the
+- * level 0 tnode entries must be zeroed out.
+- * Could also use this for file deletion, but that's probably better handled
+- * by a special case.
+- */
+-
+-static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn,
+- __u32 level, int del0)
+-{
+- int i;
+- int hasData;
+-
+- if (tn) {
+- hasData = 0;
+-
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+- if (tn->internal[i] && level > 0) {
+- tn->internal[i] =
+- yaffs_PruneWorker(dev, tn->internal[i],
+- level - 1,
+- (i == 0) ? del0 : 1);
+- }
+-
+- if (tn->internal[i])
+- hasData++;
+- }
+-
+- if (hasData == 0 && del0) {
+- /* Free and return NULL */
+-
+- yaffs_FreeTnode(dev, tn);
+- tn = NULL;
+- }
+-
+- }
+-
+- return tn;
+-
+-}
+-
+-static int yaffs_PruneFileStructure(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct)
+-{
+- int i;
+- int hasData;
+- int done = 0;
+- yaffs_Tnode *tn;
+-
+- if (fStruct->topLevel > 0) {
+- fStruct->top =
+- yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0);
+-
+- /* Now we have a tree with all the non-zero branches NULL but the height
+- * is the same as it was.
+- * Let's see if we can trim internal tnodes to shorten the tree.
+- * We can do this if only the 0th element in the tnode is in use
+- * (ie all the non-zero are NULL)
+- */
+-
+- while (fStruct->topLevel && !done) {
+- tn = fStruct->top;
+-
+- hasData = 0;
+- for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+- if (tn->internal[i])
+- hasData++;
+- }
+-
+- if (!hasData) {
+- fStruct->top = tn->internal[0];
+- fStruct->topLevel--;
+- yaffs_FreeTnode(dev, tn);
+- } else {
+- done = 1;
+- }
+- }
+- }
+-
+- return YAFFS_OK;
+-}
+-
+-/*-------------------- End of File Structure functions.-------------------*/
+-
+-/* yaffs_CreateFreeObjects creates a bunch more objects and
+- * adds them to the object free list.
+- */
+-static int yaffs_CreateFreeObjects(yaffs_Device *dev, int nObjects)
+-{
+- int i;
+- yaffs_Object *newObjects;
+- yaffs_ObjectList *list;
+-
+- if (nObjects < 1)
+- return YAFFS_OK;
+-
+- /* make these things */
+- newObjects = YMALLOC(nObjects * sizeof(yaffs_Object));
+- list = YMALLOC(sizeof(yaffs_ObjectList));
+-
+- if (!newObjects || !list) {
+- if (newObjects)
+- YFREE(newObjects);
+- if (list)
+- YFREE(list);
+- T(YAFFS_TRACE_ALLOCATE,
+- (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
+- return YAFFS_FAIL;
+- }
+-
+- /* Hook them into the free list */
+- for (i = 0; i < nObjects - 1; i++) {
+- newObjects[i].siblings.next =
+- (struct ylist_head *)(&newObjects[i + 1]);
+- }
+-
+- newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
+- dev->freeObjects = newObjects;
+- dev->nFreeObjects += nObjects;
+- dev->nObjectsCreated += nObjects;
+-
+- /* Now add this bunch of Objects to a list for freeing up. */
+-
+- list->objects = newObjects;
+- list->next = dev->allocatedObjectList;
+- dev->allocatedObjectList = list;
+-
+- return YAFFS_OK;
+-}
+-
+-
+-/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+-static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device *dev)
+-{
+- yaffs_Object *tn = NULL;
+-
+-#ifdef VALGRIND_TEST
+- tn = YMALLOC(sizeof(yaffs_Object));
+-#else
+- /* If there are none left make more */
+- if (!dev->freeObjects)
+- yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
+-
+- if (dev->freeObjects) {
+- tn = dev->freeObjects;
+- dev->freeObjects =
+- (yaffs_Object *) (dev->freeObjects->siblings.next);
+- dev->nFreeObjects--;
+- }
+-#endif
+- if (tn) {
+- /* Now sweeten it up... */
+-
+- memset(tn, 0, sizeof(yaffs_Object));
+- tn->beingCreated = 1;
+-
+- tn->myDev = dev;
+- tn->hdrChunk = 0;
+- tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
+- YINIT_LIST_HEAD(&(tn->hardLinks));
+- YINIT_LIST_HEAD(&(tn->hashLink));
+- YINIT_LIST_HEAD(&tn->siblings);
+-
+-
+- /* Now make the directory sane */
+- if (dev->rootDir) {
+- tn->parent = dev->rootDir;
+- ylist_add(&(tn->siblings), &dev->rootDir->variant.directoryVariant.children);
+- }
+-
+- /* Add it to the lost and found directory.
+- * NB Can't put root or lostNFound in lostNFound so
+- * check if lostNFound exists first
+- */
+- if (dev->lostNFoundDir)
+- yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
+-
+- tn->beingCreated = 0;
+- }
+-
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+-
+- return tn;
+-}
+-
+-static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device *dev, int number,
+- __u32 mode)
+-{
+-
+- yaffs_Object *obj =
+- yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+- if (obj) {
+- obj->fake = 1; /* it is fake so it might have no NAND presence... */
+- obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */
+- obj->unlinkAllowed = 0; /* ... or unlink it */
+- obj->deleted = 0;
+- obj->unlinked = 0;
+- obj->yst_mode = mode;
+- obj->myDev = dev;
+- obj->hdrChunk = 0; /* Not a valid chunk. */
+- }
+-
+- return obj;
+-
+-}
+-
+-static void yaffs_UnhashObject(yaffs_Object *tn)
+-{
+- int bucket;
+- yaffs_Device *dev = tn->myDev;
+-
+- /* If it is still linked into the bucket list, free from the list */
+- if (!ylist_empty(&tn->hashLink)) {
+- ylist_del_init(&tn->hashLink);
+- bucket = yaffs_HashFunction(tn->objectId);
+- dev->objectBucket[bucket].count--;
+- }
+-}
+-
+-/* FreeObject frees up a Object and puts it back on the free list */
+-static void yaffs_FreeObject(yaffs_Object *tn)
+-{
+- yaffs_Device *dev = tn->myDev;
+-
+-#ifdef __KERNEL__
+- T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode));
+-#endif
+-
+- if (tn->parent)
+- YBUG();
+- if (!ylist_empty(&tn->siblings))
+- YBUG();
+-
+-
+-#ifdef __KERNEL__
+- if (tn->myInode) {
+- /* We're still hooked up to a cached inode.
+- * Don't delete now, but mark for later deletion
+- */
+- tn->deferedFree = 1;
+- return;
+- }
+-#endif
+-
+- yaffs_UnhashObject(tn);
+-
+-#ifdef VALGRIND_TEST
+- YFREE(tn);
+-#else
+- /* Link into the free list. */
+- tn->siblings.next = (struct ylist_head *)(dev->freeObjects);
+- dev->freeObjects = tn;
+- dev->nFreeObjects++;
+-#endif
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+-}
+-
+-#ifdef __KERNEL__
+-
+-void yaffs_HandleDeferedFree(yaffs_Object *obj)
+-{
+- if (obj->deferedFree)
+- yaffs_FreeObject(obj);
+-}
+-
+-#endif
+-
+-static void yaffs_DeinitialiseObjects(yaffs_Device *dev)
+-{
+- /* Free the list of allocated Objects */
+-
+- yaffs_ObjectList *tmp;
+-
+- while (dev->allocatedObjectList) {
+- tmp = dev->allocatedObjectList->next;
+- YFREE(dev->allocatedObjectList->objects);
+- YFREE(dev->allocatedObjectList);
+-
+- dev->allocatedObjectList = tmp;
+- }
+-
+- dev->freeObjects = NULL;
+- dev->nFreeObjects = 0;
+-}
+-
+-static void yaffs_InitialiseObjects(yaffs_Device *dev)
+-{
+- int i;
+-
+- dev->allocatedObjectList = NULL;
+- dev->freeObjects = NULL;
+- dev->nFreeObjects = 0;
+-
+- for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+- YINIT_LIST_HEAD(&dev->objectBucket[i].list);
+- dev->objectBucket[i].count = 0;
+- }
+-}
+-
+-static int yaffs_FindNiceObjectBucket(yaffs_Device *dev)
+-{
+- static int x;
+- int i;
+- int l = 999;
+- int lowest = 999999;
+-
+- /* First let's see if we can find one that's empty. */
+-
+- for (i = 0; i < 10 && lowest > 0; i++) {
+- x++;
+- x %= YAFFS_NOBJECT_BUCKETS;
+- if (dev->objectBucket[x].count < lowest) {
+- lowest = dev->objectBucket[x].count;
+- l = x;
+- }
+-
+- }
+-
+- /* If we didn't find an empty list, then try
+- * looking a bit further for a short one
+- */
+-
+- for (i = 0; i < 10 && lowest > 3; i++) {
+- x++;
+- x %= YAFFS_NOBJECT_BUCKETS;
+- if (dev->objectBucket[x].count < lowest) {
+- lowest = dev->objectBucket[x].count;
+- l = x;
+- }
+-
+- }
+-
+- return l;
+-}
+-
+-static int yaffs_CreateNewObjectNumber(yaffs_Device *dev)
+-{
+- int bucket = yaffs_FindNiceObjectBucket(dev);
+-
+- /* Now find an object value that has not already been taken
+- * by scanning the list.
+- */
+-
+- int found = 0;
+- struct ylist_head *i;
+-
+- __u32 n = (__u32) bucket;
+-
+- /* yaffs_CheckObjectHashSanity(); */
+-
+- while (!found) {
+- found = 1;
+- n += YAFFS_NOBJECT_BUCKETS;
+- if (1 || dev->objectBucket[bucket].count > 0) {
+- ylist_for_each(i, &dev->objectBucket[bucket].list) {
+- /* If there is already one in the list */
+- if (i && ylist_entry(i, yaffs_Object,
+- hashLink)->objectId == n) {
+- found = 0;
+- }
+- }
+- }
+- }
+-
+- return n;
+-}
+-
+-static void yaffs_HashObject(yaffs_Object *in)
+-{
+- int bucket = yaffs_HashFunction(in->objectId);
+- yaffs_Device *dev = in->myDev;
+-
+- ylist_add(&in->hashLink, &dev->objectBucket[bucket].list);
+- dev->objectBucket[bucket].count++;
+-}
+-
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number)
+-{
+- int bucket = yaffs_HashFunction(number);
+- struct ylist_head *i;
+- yaffs_Object *in;
+-
+- ylist_for_each(i, &dev->objectBucket[bucket].list) {
+- /* Look if it is in the list */
+- if (i) {
+- in = ylist_entry(i, yaffs_Object, hashLink);
+- if (in->objectId == number) {
+-#ifdef __KERNEL__
+- /* Don't tell the VFS about this one if it is defered free */
+- if (in->deferedFree)
+- return NULL;
+-#endif
+-
+- return in;
+- }
+- }
+- }
+-
+- return NULL;
+-}
+-
+-yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
+- yaffs_ObjectType type)
+-{
+- yaffs_Object *theObject;
+- yaffs_Tnode *tn = NULL;
+-
+- if (number < 0)
+- number = yaffs_CreateNewObjectNumber(dev);
+-
+- theObject = yaffs_AllocateEmptyObject(dev);
+- if (!theObject)
+- return NULL;
+-
+- if (type == YAFFS_OBJECT_TYPE_FILE) {
+- tn = yaffs_GetTnode(dev);
+- if (!tn) {
+- yaffs_FreeObject(theObject);
+- return NULL;
+- }
+- }
+-
+- if (theObject) {
+- theObject->fake = 0;
+- theObject->renameAllowed = 1;
+- theObject->unlinkAllowed = 1;
+- theObject->objectId = number;
+- yaffs_HashObject(theObject);
+- theObject->variantType = type;
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_WinFileTimeNow(theObject->win_atime);
+- theObject->win_ctime[0] = theObject->win_mtime[0] =
+- theObject->win_atime[0];
+- theObject->win_ctime[1] = theObject->win_mtime[1] =
+- theObject->win_atime[1];
+-
+-#else
+-
+- theObject->yst_atime = theObject->yst_mtime =
+- theObject->yst_ctime = Y_CURRENT_TIME;
+-#endif
+- switch (type) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- theObject->variant.fileVariant.fileSize = 0;
+- theObject->variant.fileVariant.scannedFileSize = 0;
+- theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF; /* max __u32 */
+- theObject->variant.fileVariant.topLevel = 0;
+- theObject->variant.fileVariant.top = tn;
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- YINIT_LIST_HEAD(&theObject->variant.directoryVariant.
+- children);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* No action required */
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* todo this should not happen */
+- break;
+- }
+- }
+-
+- return theObject;
+-}
+-
+-static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device *dev,
+- int number,
+- yaffs_ObjectType type)
+-{
+- yaffs_Object *theObject = NULL;
+-
+- if (number > 0)
+- theObject = yaffs_FindObjectByNumber(dev, number);
+-
+- if (!theObject)
+- theObject = yaffs_CreateNewObject(dev, number, type);
+-
+- return theObject;
+-
+-}
+-
+-
+-static YCHAR *yaffs_CloneString(const YCHAR *str)
+-{
+- YCHAR *newStr = NULL;
+-
+- if (str && *str) {
+- newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
+- if (newStr)
+- yaffs_strcpy(newStr, str);
+- }
+-
+- return newStr;
+-
+-}
+-
+-/*
+- * Mknod (create) a new object.
+- * equivalentObject only has meaning for a hard link;
+- * aliasString only has meaning for a sumlink.
+- * rdev only has meaning for devices (a subset of special objects)
+- */
+-
+-static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
+- yaffs_Object *parent,
+- const YCHAR *name,
+- __u32 mode,
+- __u32 uid,
+- __u32 gid,
+- yaffs_Object *equivalentObject,
+- const YCHAR *aliasString, __u32 rdev)
+-{
+- yaffs_Object *in;
+- YCHAR *str = NULL;
+-
+- yaffs_Device *dev = parent->myDev;
+-
+- /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
+- if (yaffs_FindObjectByName(parent, name))
+- return NULL;
+-
+- in = yaffs_CreateNewObject(dev, -1, type);
+-
+- if (!in)
+- return YAFFS_FAIL;
+-
+- if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+- str = yaffs_CloneString(aliasString);
+- if (!str) {
+- yaffs_FreeObject(in);
+- return NULL;
+- }
+- }
+-
+-
+-
+- if (in) {
+- in->hdrChunk = 0;
+- in->valid = 1;
+- in->variantType = type;
+-
+- in->yst_mode = mode;
+-
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_WinFileTimeNow(in->win_atime);
+- in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
+- in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
+-
+-#else
+- in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
+-
+- in->yst_rdev = rdev;
+- in->yst_uid = uid;
+- in->yst_gid = gid;
+-#endif
+- in->nDataChunks = 0;
+-
+- yaffs_SetObjectName(in, name);
+- in->dirty = 1;
+-
+- yaffs_AddObjectToDirectory(parent, in);
+-
+- in->myDev = parent->myDev;
+-
+- switch (type) {
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- in->variant.symLinkVariant.alias = str;
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- in->variant.hardLinkVariant.equivalentObject =
+- equivalentObject;
+- in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObject->objectId;
+- ylist_add(&in->hardLinks, &equivalentObject->hardLinks);
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* do nothing */
+- break;
+- }
+-
+- if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
+- /* Could not create the object header, fail the creation */
+- yaffs_DeleteObject(in);
+- in = NULL;
+- }
+-
+- }
+-
+- return in;
+-}
+-
+-yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+- uid, gid, NULL, NULL, 0);
+-}
+-
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+- mode, uid, gid, NULL, NULL, 0);
+-}
+-
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+- uid, gid, NULL, NULL, rdev);
+-}
+-
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid,
+- const YCHAR *alias)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+- uid, gid, NULL, alias, 0);
+-}
+-
+-/* yaffs_Link returns the object id of the equivalent object.*/
+-yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
+- yaffs_Object *equivalentObject)
+-{
+- /* Get the real object in case we were fed a hard link as an equivalent object */
+- equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
+-
+- if (yaffs_MknodObject
+- (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+- equivalentObject, NULL, 0)) {
+- return equivalentObject;
+- } else {
+- return NULL;
+- }
+-
+-}
+-
+-static int yaffs_ChangeObjectName(yaffs_Object *obj, yaffs_Object *newDir,
+- const YCHAR *newName, int force, int shadows)
+-{
+- int unlinkOp;
+- int deleteOp;
+-
+- yaffs_Object *existingTarget;
+-
+- if (newDir == NULL)
+- newDir = obj->parent; /* use the old directory */
+-
+- if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR
+- ("tragedy: yaffs_ChangeObjectName: newDir is not a directory"
+- TENDSTR)));
+- YBUG();
+- }
+-
+- /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+- if (obj->myDev->isYaffs2)
+- unlinkOp = (newDir == obj->myDev->unlinkedDir);
+- else
+- unlinkOp = (newDir == obj->myDev->unlinkedDir
+- && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
+-
+- deleteOp = (newDir == obj->myDev->deletedDir);
+-
+- existingTarget = yaffs_FindObjectByName(newDir, newName);
+-
+- /* If the object is a file going into the unlinked directory,
+- * then it is OK to just stuff it in since duplicate names are allowed.
+- * else only proceed if the new name does not exist and if we're putting
+- * it into a directory.
+- */
+- if ((unlinkOp ||
+- deleteOp ||
+- force ||
+- (shadows > 0) ||
+- !existingTarget) &&
+- newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) {
+- yaffs_SetObjectName(obj, newName);
+- obj->dirty = 1;
+-
+- yaffs_AddObjectToDirectory(newDir, obj);
+-
+- if (unlinkOp)
+- obj->unlinked = 1;
+-
+- /* If it is a deletion then we mark it as a shrink for gc purposes. */
+- if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows) >= 0)
+- return YAFFS_OK;
+- }
+-
+- return YAFFS_FAIL;
+-}
+-
+-int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
+- yaffs_Object *newDir, const YCHAR *newName)
+-{
+- yaffs_Object *obj = NULL;
+- yaffs_Object *existingTarget = NULL;
+- int force = 0;
+-
+-
+- if (!oldDir || oldDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
+- YBUG();
+- if (!newDir || newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
+- YBUG();
+-
+-#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+- /* Special case for case insemsitive systems (eg. WinCE).
+- * While look-up is case insensitive, the name isn't.
+- * Therefore we might want to change x.txt to X.txt
+- */
+- if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0)
+- force = 1;
+-#endif
+-
+- else if (yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
+- /* ENAMETOOLONG */
+- return YAFFS_FAIL;
+-
+- obj = yaffs_FindObjectByName(oldDir, oldName);
+-
+- if (obj && obj->renameAllowed) {
+-
+- /* Now do the handling for an existing target, if there is one */
+-
+- existingTarget = yaffs_FindObjectByName(newDir, newName);
+- if (existingTarget &&
+- existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !ylist_empty(&existingTarget->variant.directoryVariant.children)) {
+- /* There is a target that is a non-empty directory, so we fail */
+- return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
+- } else if (existingTarget && existingTarget != obj) {
+- /* Nuke the target first, using shadowing,
+- * but only if it isn't the same object
+- */
+- yaffs_ChangeObjectName(obj, newDir, newName, force,
+- existingTarget->objectId);
+- yaffs_UnlinkObject(existingTarget);
+- }
+-
+- return yaffs_ChangeObjectName(obj, newDir, newName, 1, 0);
+- }
+- return YAFFS_FAIL;
+-}
+-
+-/*------------------------- Block Management and Page Allocation ----------------*/
+-
+-static int yaffs_InitialiseBlocks(yaffs_Device *dev)
+-{
+- int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+-
+- dev->blockInfo = NULL;
+- dev->chunkBits = NULL;
+-
+- dev->allocationBlock = -1; /* force it to get a new one */
+-
+- /* If the first allocation strategy fails, thry the alternate one */
+- dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
+- if (!dev->blockInfo) {
+- dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
+- dev->blockInfoAlt = 1;
+- } else
+- dev->blockInfoAlt = 0;
+-
+- if (dev->blockInfo) {
+- /* Set up dynamic blockinfo stuff. */
+- dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
+- dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
+- if (!dev->chunkBits) {
+- dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
+- dev->chunkBitsAlt = 1;
+- } else
+- dev->chunkBitsAlt = 0;
+- }
+-
+- if (dev->blockInfo && dev->chunkBits) {
+- memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo));
+- memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks);
+- return YAFFS_OK;
+- }
+-
+- return YAFFS_FAIL;
+-}
+-
+-static void yaffs_DeinitialiseBlocks(yaffs_Device *dev)
+-{
+- if (dev->blockInfoAlt && dev->blockInfo)
+- YFREE_ALT(dev->blockInfo);
+- else if (dev->blockInfo)
+- YFREE(dev->blockInfo);
+-
+- dev->blockInfoAlt = 0;
+-
+- dev->blockInfo = NULL;
+-
+- if (dev->chunkBitsAlt && dev->chunkBits)
+- YFREE_ALT(dev->chunkBits);
+- else if (dev->chunkBits)
+- YFREE(dev->chunkBits);
+- dev->chunkBitsAlt = 0;
+- dev->chunkBits = NULL;
+-}
+-
+-static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev,
+- yaffs_BlockInfo *bi)
+-{
+- int i;
+- __u32 seq;
+- yaffs_BlockInfo *b;
+-
+- if (!dev->isYaffs2)
+- return 1; /* disqualification only applies to yaffs2. */
+-
+- if (!bi->hasShrinkHeader)
+- return 1; /* can gc */
+-
+- /* Find the oldest dirty sequence number if we don't know it and save it
+- * so we don't have to keep recomputing it.
+- */
+- if (!dev->oldestDirtySequence) {
+- seq = dev->sequenceNumber;
+-
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
+- i++) {
+- b = yaffs_GetBlockInfo(dev, i);
+- if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
+- (b->pagesInUse - b->softDeletions) <
+- dev->nChunksPerBlock && b->sequenceNumber < seq) {
+- seq = b->sequenceNumber;
+- }
+- }
+- dev->oldestDirtySequence = seq;
+- }
+-
+- /* Can't do gc of this block if there are any blocks older than this one that have
+- * discarded pages.
+- */
+- return (bi->sequenceNumber <= dev->oldestDirtySequence);
+-}
+-
+-/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
+- * for garbage collection.
+- */
+-
+-static int yaffs_FindBlockForGarbageCollection(yaffs_Device *dev,
+- int aggressive)
+-{
+- int b = dev->currentDirtyChecker;
+-
+- int i;
+- int iterations;
+- int dirtiest = -1;
+- int pagesInUse = 0;
+- int prioritised = 0;
+- yaffs_BlockInfo *bi;
+- int pendingPrioritisedExist = 0;
+-
+- /* First let's see if we need to grab a prioritised block */
+- if (dev->hasPendingPrioritisedGCs) {
+- for (i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++) {
+-
+- bi = yaffs_GetBlockInfo(dev, i);
+- /* yaffs_VerifyBlock(dev,bi,i); */
+-
+- if (bi->gcPrioritise) {
+- pendingPrioritisedExist = 1;
+- if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+- pagesInUse = (bi->pagesInUse - bi->softDeletions);
+- dirtiest = i;
+- prioritised = 1;
+- aggressive = 1; /* Fool the non-aggressive skip logiv below */
+- }
+- }
+- }
+-
+- if (!pendingPrioritisedExist) /* None found, so we can clear this */
+- dev->hasPendingPrioritisedGCs = 0;
+- }
+-
+- /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+- * search harder.
+- * else (we're doing a leasurely gc), then we only bother to do this if the
+- * block has only a few pages in use.
+- */
+-
+- dev->nonAggressiveSkip--;
+-
+- if (!aggressive && (dev->nonAggressiveSkip > 0))
+- return -1;
+-
+- if (!prioritised)
+- pagesInUse =
+- (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+-
+- if (aggressive)
+- iterations =
+- dev->internalEndBlock - dev->internalStartBlock + 1;
+- else {
+- iterations =
+- dev->internalEndBlock - dev->internalStartBlock + 1;
+- iterations = iterations / 16;
+- if (iterations > 200)
+- iterations = 200;
+- }
+-
+- for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
+- b++;
+- if (b < dev->internalStartBlock || b > dev->internalEndBlock)
+- b = dev->internalStartBlock;
+-
+- if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> Block %d is not valid" TENDSTR), b));
+- YBUG();
+- }
+-
+- bi = yaffs_GetBlockInfo(dev, b);
+-
+- if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+- dirtiest = b;
+- pagesInUse = (bi->pagesInUse - bi->softDeletions);
+- }
+- }
+-
+- dev->currentDirtyChecker = b;
+-
+- if (dirtiest > 0) {
+- T(YAFFS_TRACE_GC,
+- (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
+- dev->nChunksPerBlock - pagesInUse, prioritised));
+- }
+-
+- dev->oldestDirtySequence = 0;
+-
+- if (dirtiest > 0)
+- dev->nonAggressiveSkip = 4;
+-
+- return dirtiest;
+-}
+-
+-static void yaffs_BlockBecameDirty(yaffs_Device *dev, int blockNo)
+-{
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
+-
+- int erasedOk = 0;
+-
+- /* If the block is still healthy erase it and mark as clean.
+- * If the block has had a data failure, then retire it.
+- */
+-
+- T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+- (TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
+- blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
+-
+- bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
+-
+- if (!bi->needsRetiring) {
+- yaffs_InvalidateCheckpoint(dev);
+- erasedOk = yaffs_EraseBlockInNAND(dev, blockNo);
+- if (!erasedOk) {
+- dev->nErasureFailures++;
+- T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>> Erasure failed %d" TENDSTR), blockNo));
+- }
+- }
+-
+- if (erasedOk &&
+- ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) {
+- int i;
+- for (i = 0; i < dev->nChunksPerBlock; i++) {
+- if (!yaffs_CheckChunkErased
+- (dev, blockNo * dev->nChunksPerBlock + i)) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- (">>Block %d erasure supposedly OK, but chunk %d not erased"
+- TENDSTR), blockNo, i));
+- }
+- }
+- }
+-
+- if (erasedOk) {
+- /* Clean it up... */
+- bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- bi->pagesInUse = 0;
+- bi->softDeletions = 0;
+- bi->hasShrinkHeader = 0;
+- bi->skipErasedCheck = 1; /* This is clean, so no need to check */
+- bi->gcPrioritise = 0;
+- yaffs_ClearChunkBits(dev, blockNo);
+-
+- T(YAFFS_TRACE_ERASE,
+- (TSTR("Erased block %d" TENDSTR), blockNo));
+- } else {
+- dev->nFreeChunks -= dev->nChunksPerBlock; /* We lost a block of free space */
+-
+- yaffs_RetireBlock(dev, blockNo);
+- T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>> Block %d retired" TENDSTR), blockNo));
+- }
+-}
+-
+-static int yaffs_FindBlockForAllocation(yaffs_Device *dev)
+-{
+- int i;
+-
+- yaffs_BlockInfo *bi;
+-
+- if (dev->nErasedBlocks < 1) {
+- /* Hoosterman we've got a problem.
+- * Can't get space to gc
+- */
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
+-
+- return -1;
+- }
+-
+- /* Find an empty block. */
+-
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- dev->allocationBlockFinder++;
+- if (dev->allocationBlockFinder < dev->internalStartBlock
+- || dev->allocationBlockFinder > dev->internalEndBlock) {
+- dev->allocationBlockFinder = dev->internalStartBlock;
+- }
+-
+- bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder);
+-
+- if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+- bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING;
+- dev->sequenceNumber++;
+- bi->sequenceNumber = dev->sequenceNumber;
+- dev->nErasedBlocks--;
+- T(YAFFS_TRACE_ALLOCATE,
+- (TSTR("Allocated block %d, seq %d, %d left" TENDSTR),
+- dev->allocationBlockFinder, dev->sequenceNumber,
+- dev->nErasedBlocks));
+- return dev->allocationBlockFinder;
+- }
+- }
+-
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR
+- ("yaffs tragedy: no more erased blocks, but there should have been %d"
+- TENDSTR), dev->nErasedBlocks));
+-
+- return -1;
+-}
+-
+-
+-
+-static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev)
+-{
+- if (!dev->nCheckpointBlocksRequired &&
+- dev->isYaffs2) {
+- /* Not a valid value so recalculate */
+- int nBytes = 0;
+- int nBlocks;
+- int devBlocks = (dev->endBlock - dev->startBlock + 1);
+- int tnodeSize;
+-
+- tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+-
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
+-
+- nBytes += sizeof(yaffs_CheckpointValidity);
+- nBytes += sizeof(yaffs_CheckpointDevice);
+- nBytes += devBlocks * sizeof(yaffs_BlockInfo);
+- nBytes += devBlocks * dev->chunkBitmapStride;
+- nBytes += (sizeof(yaffs_CheckpointObject) + sizeof(__u32)) * (dev->nObjectsCreated - dev->nFreeObjects);
+- nBytes += (tnodeSize + sizeof(__u32)) * (dev->nTnodesCreated - dev->nFreeTnodes);
+- nBytes += sizeof(yaffs_CheckpointValidity);
+- nBytes += sizeof(__u32); /* checksum*/
+-
+- /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
+-
+- nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->nChunksPerBlock)) + 3;
+-
+- dev->nCheckpointBlocksRequired = nBlocks;
+- }
+-
+- return dev->nCheckpointBlocksRequired;
+-}
+-
+-/*
+- * Check if there's space to allocate...
+- * Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
+- */
+-static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev)
+-{
+- int reservedChunks;
+- int reservedBlocks = dev->nReservedBlocks;
+- int checkpointBlocks;
+-
+- if (dev->isYaffs2) {
+- checkpointBlocks = yaffs_CalcCheckpointBlocksRequired(dev) -
+- dev->blocksInCheckpoint;
+- if (checkpointBlocks < 0)
+- checkpointBlocks = 0;
+- } else {
+- checkpointBlocks = 0;
+- }
+-
+- reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
+-
+- return (dev->nFreeChunks > reservedChunks);
+-}
+-
+-static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
+- yaffs_BlockInfo **blockUsedPtr)
+-{
+- int retVal;
+- yaffs_BlockInfo *bi;
+-
+- if (dev->allocationBlock < 0) {
+- /* Get next block to allocate off */
+- dev->allocationBlock = yaffs_FindBlockForAllocation(dev);
+- dev->allocationPage = 0;
+- }
+-
+- if (!useReserve && !yaffs_CheckSpaceForAllocation(dev)) {
+- /* Not enough space to allocate unless we're allowed to use the reserve. */
+- return -1;
+- }
+-
+- if (dev->nErasedBlocks < dev->nReservedBlocks
+- && dev->allocationPage == 0) {
+- T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
+- }
+-
+- /* Next page please.... */
+- if (dev->allocationBlock >= 0) {
+- bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
+-
+- retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
+- dev->allocationPage;
+- bi->pagesInUse++;
+- yaffs_SetChunkBit(dev, dev->allocationBlock,
+- dev->allocationPage);
+-
+- dev->allocationPage++;
+-
+- dev->nFreeChunks--;
+-
+- /* If the block is full set the state to full */
+- if (dev->allocationPage >= dev->nChunksPerBlock) {
+- bi->blockState = YAFFS_BLOCK_STATE_FULL;
+- dev->allocationBlock = -1;
+- }
+-
+- if (blockUsedPtr)
+- *blockUsedPtr = bi;
+-
+- return retVal;
+- }
+-
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+-
+- return -1;
+-}
+-
+-static int yaffs_GetErasedChunks(yaffs_Device *dev)
+-{
+- int n;
+-
+- n = dev->nErasedBlocks * dev->nChunksPerBlock;
+-
+- if (dev->allocationBlock > 0)
+- n += (dev->nChunksPerBlock - dev->allocationPage);
+-
+- return n;
+-
+-}
+-
+-static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block,
+- int wholeBlock)
+-{
+- int oldChunk;
+- int newChunk;
+- int markNAND;
+- int retVal = YAFFS_OK;
+- int cleanups = 0;
+- int i;
+- int isCheckpointBlock;
+- int matchingChunk;
+- int maxCopies;
+-
+- int chunksBefore = yaffs_GetErasedChunks(dev);
+- int chunksAfter;
+-
+- yaffs_ExtendedTags tags;
+-
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block);
+-
+- yaffs_Object *object;
+-
+- isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT);
+-
+- bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
+-
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
+- block,
+- bi->pagesInUse,
+- bi->hasShrinkHeader,
+- wholeBlock));
+-
+- /*yaffs_VerifyFreeChunks(dev); */
+-
+- bi->hasShrinkHeader = 0; /* clear the flag so that the block can erase */
+-
+- /* Take off the number of soft deleted entries because
+- * they're going to get really deleted during GC.
+- */
+- dev->nFreeChunks -= bi->softDeletions;
+-
+- dev->isDoingGC = 1;
+-
+- if (isCheckpointBlock ||
+- !yaffs_StillSomeChunkBits(dev, block)) {
+- T(YAFFS_TRACE_TRACING,
+- (TSTR
+- ("Collecting block %d that has no chunks in use" TENDSTR),
+- block));
+- yaffs_BlockBecameDirty(dev, block);
+- } else {
+-
+- __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+-
+- yaffs_VerifyBlock(dev, bi, block);
+-
+- maxCopies = (wholeBlock) ? dev->nChunksPerBlock : 10;
+- oldChunk = block * dev->nChunksPerBlock + dev->gcChunk;
+-
+- for (/* init already done */;
+- retVal == YAFFS_OK &&
+- dev->gcChunk < dev->nChunksPerBlock &&
+- (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) &&
+- maxCopies > 0;
+- dev->gcChunk++, oldChunk++) {
+- if (yaffs_CheckChunkBit(dev, block, dev->gcChunk)) {
+-
+- /* This page is in use and might need to be copied off */
+-
+- maxCopies--;
+-
+- markNAND = 1;
+-
+- yaffs_InitialiseTags(&tags);
+-
+- yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk,
+- buffer, &tags);
+-
+- object =
+- yaffs_FindObjectByNumber(dev,
+- tags.objectId);
+-
+- T(YAFFS_TRACE_GC_DETAIL,
+- (TSTR
+- ("Collecting chunk in block %d, %d %d %d " TENDSTR),
+- dev->gcChunk, tags.objectId, tags.chunkId,
+- tags.byteCount));
+-
+- if (object && !yaffs_SkipVerification(dev)) {
+- if (tags.chunkId == 0)
+- matchingChunk = object->hdrChunk;
+- else if (object->softDeleted)
+- matchingChunk = oldChunk; /* Defeat the test */
+- else
+- matchingChunk = yaffs_FindChunkInFile(object, tags.chunkId, NULL);
+-
+- if (oldChunk != matchingChunk)
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
+- oldChunk, matchingChunk, tags.objectId, tags.chunkId));
+-
+- }
+-
+- if (!object) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("page %d in gc has no object: %d %d %d "
+- TENDSTR), oldChunk,
+- tags.objectId, tags.chunkId, tags.byteCount));
+- }
+-
+- if (object &&
+- object->deleted &&
+- object->softDeleted &&
+- tags.chunkId != 0) {
+- /* Data chunk in a soft deleted file, throw it away
+- * It's a soft deleted data chunk,
+- * No need to copy this, just forget about it and
+- * fix up the object.
+- */
+-
+- object->nDataChunks--;
+-
+- if (object->nDataChunks <= 0) {
+- /* remeber to clean up the object */
+- dev->gcCleanupList[cleanups] =
+- tags.objectId;
+- cleanups++;
+- }
+- markNAND = 0;
+- } else if (0) {
+- /* Todo object && object->deleted && object->nDataChunks == 0 */
+- /* Deleted object header with no data chunks.
+- * Can be discarded and the file deleted.
+- */
+- object->hdrChunk = 0;
+- yaffs_FreeTnode(object->myDev,
+- object->variant.
+- fileVariant.top);
+- object->variant.fileVariant.top = NULL;
+- yaffs_DoGenericObjectDeletion(object);
+-
+- } else if (object) {
+- /* It's either a data chunk in a live file or
+- * an ObjectHeader, so we're interested in it.
+- * NB Need to keep the ObjectHeaders of deleted files
+- * until the whole file has been deleted off
+- */
+- tags.serialNumber++;
+-
+- dev->nGCCopies++;
+-
+- if (tags.chunkId == 0) {
+- /* It is an object Id,
+- * We need to nuke the shrinkheader flags first
+- * We no longer want the shrinkHeader flag since its work is done
+- * and if it is left in place it will mess up scanning.
+- */
+-
+- yaffs_ObjectHeader *oh;
+- oh = (yaffs_ObjectHeader *)buffer;
+- oh->isShrink = 0;
+- tags.extraIsShrinkHeader = 0;
+-
+- yaffs_VerifyObjectHeader(object, oh, &tags, 1);
+- }
+-
+- newChunk =
+- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
+-
+- if (newChunk < 0) {
+- retVal = YAFFS_FAIL;
+- } else {
+-
+- /* Ok, now fix up the Tnodes etc. */
+-
+- if (tags.chunkId == 0) {
+- /* It's a header */
+- object->hdrChunk = newChunk;
+- object->serial = tags.serialNumber;
+- } else {
+- /* It's a data chunk */
+- yaffs_PutChunkIntoFile
+- (object,
+- tags.chunkId,
+- newChunk, 0);
+- }
+- }
+- }
+-
+- if (retVal == YAFFS_OK)
+- yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
+-
+- }
+- }
+-
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+-
+-
+- /* Do any required cleanups */
+- for (i = 0; i < cleanups; i++) {
+- /* Time to delete the file too */
+- object =
+- yaffs_FindObjectByNumber(dev,
+- dev->gcCleanupList[i]);
+- if (object) {
+- yaffs_FreeTnode(dev,
+- object->variant.fileVariant.
+- top);
+- object->variant.fileVariant.top = NULL;
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("yaffs: About to finally delete object %d"
+- TENDSTR), object->objectId));
+- yaffs_DoGenericObjectDeletion(object);
+- object->myDev->nDeletedFiles--;
+- }
+-
+- }
+-
+- }
+-
+- yaffs_VerifyCollectedBlock(dev, bi, block);
+-
+- chunksAfter = yaffs_GetErasedChunks(dev);
+- if (chunksBefore >= chunksAfter) {
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("gc did not increase free chunks before %d after %d"
+- TENDSTR), chunksBefore, chunksAfter));
+- }
+-
+- /* If the gc completed then clear the current gcBlock so that we find another. */
+- if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING) {
+- dev->gcBlock = -1;
+- dev->gcChunk = 0;
+- }
+-
+- dev->isDoingGC = 0;
+-
+- return retVal;
+-}
+-
+-/* New garbage collector
+- * If we're very low on erased blocks then we do aggressive garbage collection
+- * otherwise we do "leasurely" garbage collection.
+- * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+- * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+- *
+- * The idea is to help clear out space in a more spread-out manner.
+- * Dunno if it really does anything useful.
+- */
+-static int yaffs_CheckGarbageCollection(yaffs_Device *dev)
+-{
+- int block;
+- int aggressive;
+- int gcOk = YAFFS_OK;
+- int maxTries = 0;
+-
+- int checkpointBlockAdjust;
+-
+- if (dev->isDoingGC) {
+- /* Bail out so we don't get recursive gc */
+- return YAFFS_OK;
+- }
+-
+- /* This loop should pass the first time.
+- * We'll only see looping here if the erase of the collected block fails.
+- */
+-
+- do {
+- maxTries++;
+-
+- checkpointBlockAdjust = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
+- if (checkpointBlockAdjust < 0)
+- checkpointBlockAdjust = 0;
+-
+- if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
+- /* We need a block soon...*/
+- aggressive = 1;
+- } else {
+- /* We're in no hurry */
+- aggressive = 0;
+- }
+-
+- if (dev->gcBlock <= 0) {
+- dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive);
+- dev->gcChunk = 0;
+- }
+-
+- block = dev->gcBlock;
+-
+- if (block > 0) {
+- dev->garbageCollections++;
+- if (!aggressive)
+- dev->passiveGarbageCollections++;
+-
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
+- dev->nErasedBlocks, aggressive));
+-
+- gcOk = yaffs_GarbageCollectBlock(dev, block, aggressive);
+- }
+-
+- if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
+- TENDSTR), dev->nErasedBlocks, maxTries, block));
+- }
+- } while ((dev->nErasedBlocks < dev->nReservedBlocks) &&
+- (block > 0) &&
+- (maxTries < 2));
+-
+- return aggressive ? gcOk : YAFFS_OK;
+-}
+-
+-/*------------------------- TAGS --------------------------------*/
+-
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
+- int chunkInObject)
+-{
+- return (tags->chunkId == chunkInObject &&
+- tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0;
+-
+-}
+-
+-
+-/*-------------------- Data file manipulation -----------------*/
+-
+-static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
+- yaffs_ExtendedTags *tags)
+-{
+- /*Get the Tnode, then get the level 0 offset chunk offset */
+- yaffs_Tnode *tn;
+- int theChunk = -1;
+- yaffs_ExtendedTags localTags;
+- int retVal = -1;
+-
+- yaffs_Device *dev = in->myDev;
+-
+- if (!tags) {
+- /* Passed a NULL, so use our own tags space */
+- tags = &localTags;
+- }
+-
+- tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+-
+- if (tn) {
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+-
+- retVal =
+- yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+- chunkInInode);
+- }
+- return retVal;
+-}
+-
+-static int yaffs_FindAndDeleteChunkInFile(yaffs_Object *in, int chunkInInode,
+- yaffs_ExtendedTags *tags)
+-{
+- /* Get the Tnode, then get the level 0 offset chunk offset */
+- yaffs_Tnode *tn;
+- int theChunk = -1;
+- yaffs_ExtendedTags localTags;
+-
+- yaffs_Device *dev = in->myDev;
+- int retVal = -1;
+-
+- if (!tags) {
+- /* Passed a NULL, so use our own tags space */
+- tags = &localTags;
+- }
+-
+- tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+-
+- if (tn) {
+-
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+-
+- retVal =
+- yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+- chunkInInode);
+-
+- /* Delete the entry in the filestructure (if found) */
+- if (retVal != -1)
+- yaffs_PutLevel0Tnode(dev, tn, chunkInInode, 0);
+- }
+-
+- return retVal;
+-}
+-
+-#ifdef YAFFS_PARANOID
+-
+-static int yaffs_CheckFileSanity(yaffs_Object *in)
+-{
+- int chunk;
+- int nChunks;
+- int fSize;
+- int failed = 0;
+- int objId;
+- yaffs_Tnode *tn;
+- yaffs_Tags localTags;
+- yaffs_Tags *tags = &localTags;
+- int theChunk;
+- int chunkDeleted;
+-
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
+- return YAFFS_FAIL;
+-
+- objId = in->objectId;
+- fSize = in->variant.fileVariant.fileSize;
+- nChunks =
+- (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk;
+-
+- for (chunk = 1; chunk <= nChunks; chunk++) {
+- tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant,
+- chunk);
+-
+- if (tn) {
+-
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk);
+-
+- if (yaffs_CheckChunkBits
+- (dev, theChunk / dev->nChunksPerBlock,
+- theChunk % dev->nChunksPerBlock)) {
+-
+- yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk,
+- tags,
+- &chunkDeleted);
+- if (yaffs_TagsMatch
+- (tags, in->objectId, chunk, chunkDeleted)) {
+- /* found it; */
+-
+- }
+- } else {
+-
+- failed = 1;
+- }
+-
+- } else {
+- /* T(("No level 0 found for %d\n", chunk)); */
+- }
+- }
+-
+- return failed ? YAFFS_FAIL : YAFFS_OK;
+-}
+-
+-#endif
+-
+-static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
+- int chunkInNAND, int inScan)
+-{
+- /* NB inScan is zero unless scanning.
+- * For forward scanning, inScan is > 0;
+- * for backward scanning inScan is < 0
+- */
+-
+- yaffs_Tnode *tn;
+- yaffs_Device *dev = in->myDev;
+- int existingChunk;
+- yaffs_ExtendedTags existingTags;
+- yaffs_ExtendedTags newTags;
+- unsigned existingSerial, newSerial;
+-
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+- /* Just ignore an attempt at putting a chunk into a non-file during scanning
+- * If it is not during Scanning then something went wrong!
+- */
+- if (!inScan) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy:attempt to put data chunk into a non-file"
+- TENDSTR)));
+- YBUG();
+- }
+-
+- yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+- return YAFFS_OK;
+- }
+-
+- tn = yaffs_AddOrFindLevel0Tnode(dev,
+- &in->variant.fileVariant,
+- chunkInInode,
+- NULL);
+- if (!tn)
+- return YAFFS_FAIL;
+-
+- existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+-
+- if (inScan != 0) {
+- /* If we're scanning then we need to test for duplicates
+- * NB This does not need to be efficient since it should only ever
+- * happen when the power fails during a write, then only one
+- * chunk should ever be affected.
+- *
+- * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+- * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+- */
+-
+- if (existingChunk > 0) {
+- /* NB Right now existing chunk will not be real chunkId if the device >= 32MB
+- * thus we have to do a FindChunkInFile to get the real chunk id.
+- *
+- * We have a duplicate now we need to decide which one to use:
+- *
+- * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+- * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+- * YAFFS1: Get both sets of tags and compare serial numbers.
+- */
+-
+- if (inScan > 0) {
+- /* Only do this for forward scanning */
+- yaffs_ReadChunkWithTagsFromNAND(dev,
+- chunkInNAND,
+- NULL, &newTags);
+-
+- /* Do a proper find */
+- existingChunk =
+- yaffs_FindChunkInFile(in, chunkInInode,
+- &existingTags);
+- }
+-
+- if (existingChunk <= 0) {
+- /*Hoosterman - how did this happen? */
+-
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: existing chunk < 0 in scan"
+- TENDSTR)));
+-
+- }
+-
+- /* NB The deleted flags should be false, otherwise the chunks will
+- * not be loaded during a scan
+- */
+-
+- if (inScan > 0) {
+- newSerial = newTags.serialNumber;
+- existingSerial = existingTags.serialNumber;
+- }
+-
+- if ((inScan > 0) &&
+- (in->myDev->isYaffs2 ||
+- existingChunk <= 0 ||
+- ((existingSerial + 1) & 3) == newSerial)) {
+- /* Forward scanning.
+- * Use new
+- * Delete the old one and drop through to update the tnode
+- */
+- yaffs_DeleteChunk(dev, existingChunk, 1,
+- __LINE__);
+- } else {
+- /* Backward scanning or we want to use the existing one
+- * Use existing.
+- * Delete the new one and return early so that the tnode isn't changed
+- */
+- yaffs_DeleteChunk(dev, chunkInNAND, 1,
+- __LINE__);
+- return YAFFS_OK;
+- }
+- }
+-
+- }
+-
+- if (existingChunk == 0)
+- in->nDataChunks++;
+-
+- yaffs_PutLevel0Tnode(dev, tn, chunkInInode, chunkInNAND);
+-
+- return YAFFS_OK;
+-}
+-
+-static int yaffs_ReadChunkDataFromObject(yaffs_Object *in, int chunkInInode,
+- __u8 *buffer)
+-{
+- int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
+-
+- if (chunkInNAND >= 0)
+- return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
+- buffer, NULL);
+- else {
+- T(YAFFS_TRACE_NANDACCESS,
+- (TSTR("Chunk %d not found zero instead" TENDSTR),
+- chunkInNAND));
+- /* get sane (zero) data if you read a hole */
+- memset(buffer, 0, in->myDev->nDataBytesPerChunk);
+- return 0;
+- }
+-
+-}
+-
+-void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn)
+-{
+- int block;
+- int page;
+- yaffs_ExtendedTags tags;
+- yaffs_BlockInfo *bi;
+-
+- if (chunkId <= 0)
+- return;
+-
+- dev->nDeletions++;
+- block = chunkId / dev->nChunksPerBlock;
+- page = chunkId % dev->nChunksPerBlock;
+-
+-
+- if (!yaffs_CheckChunkBit(dev, block, page))
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Deleting invalid chunk %d"TENDSTR),
+- chunkId));
+-
+- bi = yaffs_GetBlockInfo(dev, block);
+-
+- T(YAFFS_TRACE_DELETION,
+- (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId));
+-
+- if (markNAND &&
+- bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->isYaffs2) {
+-
+- yaffs_InitialiseTags(&tags);
+-
+- tags.chunkDeleted = 1;
+-
+- yaffs_WriteChunkWithTagsToNAND(dev, chunkId, NULL, &tags);
+- yaffs_HandleUpdateChunk(dev, chunkId, &tags);
+- } else {
+- dev->nUnmarkedDeletions++;
+- }
+-
+- /* Pull out of the management area.
+- * If the whole block became dirty, this will kick off an erasure.
+- */
+- if (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING ||
+- bi->blockState == YAFFS_BLOCK_STATE_FULL ||
+- bi->blockState == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+- bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) {
+- dev->nFreeChunks++;
+-
+- yaffs_ClearChunkBit(dev, block, page);
+-
+- bi->pagesInUse--;
+-
+- if (bi->pagesInUse == 0 &&
+- !bi->hasShrinkHeader &&
+- bi->blockState != YAFFS_BLOCK_STATE_ALLOCATING &&
+- bi->blockState != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+- yaffs_BlockBecameDirty(dev, block);
+- }
+-
+- }
+-
+-}
+-
+-static int yaffs_WriteChunkDataToObject(yaffs_Object *in, int chunkInInode,
+- const __u8 *buffer, int nBytes,
+- int useReserve)
+-{
+- /* Find old chunk Need to do this to get serial number
+- * Write new one and patch into tree.
+- * Invalidate old tags.
+- */
+-
+- int prevChunkId;
+- yaffs_ExtendedTags prevTags;
+-
+- int newChunkId;
+- yaffs_ExtendedTags newTags;
+-
+- yaffs_Device *dev = in->myDev;
+-
+- yaffs_CheckGarbageCollection(dev);
+-
+- /* Get the previous chunk at this location in the file if it exists */
+- prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags);
+-
+- /* Set up new tags */
+- yaffs_InitialiseTags(&newTags);
+-
+- newTags.chunkId = chunkInInode;
+- newTags.objectId = in->objectId;
+- newTags.serialNumber =
+- (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
+- newTags.byteCount = nBytes;
+-
+- if (nBytes < 1 || nBytes > dev->totalBytesPerChunk) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes));
+- YBUG();
+- }
+-
+- newChunkId =
+- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+- useReserve);
+-
+- if (newChunkId >= 0) {
+- yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
+-
+- if (prevChunkId >= 0)
+- yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
+-
+- yaffs_CheckFileSanity(in);
+- }
+- return newChunkId;
+-
+-}
+-
+-/* UpdateObjectHeader updates the header on NAND for an object.
+- * If name is not NULL, then that new name is used.
+- */
+-int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, int force,
+- int isShrink, int shadows)
+-{
+-
+- yaffs_BlockInfo *bi;
+-
+- yaffs_Device *dev = in->myDev;
+-
+- int prevChunkId;
+- int retVal = 0;
+- int result = 0;
+-
+- int newChunkId;
+- yaffs_ExtendedTags newTags;
+- yaffs_ExtendedTags oldTags;
+-
+- __u8 *buffer = NULL;
+- YCHAR oldName[YAFFS_MAX_NAME_LENGTH + 1];
+-
+- yaffs_ObjectHeader *oh = NULL;
++ yaffs_obj_t *in;
++ YCHAR *str = NULL;
+
+- yaffs_strcpy(oldName, _Y("silly old name"));
++ yaffs_dev_t *dev = parent->my_dev;
+
++ /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
++ if (yaffs_find_by_name(parent, name))
++ return NULL;
+
+- if (!in->fake ||
+- in == dev->rootDir || /* The rootDir should also be saved */
+- force) {
++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ str = yaffs_clone_str(aliasString);
++ if (!str)
++ return NULL;
++ }
+
+- yaffs_CheckGarbageCollection(dev);
+- yaffs_CheckObjectDetailsLoaded(in);
++ in = yaffs_new_obj(dev, -1, type);
+
+- buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
+- oh = (yaffs_ObjectHeader *) buffer;
++ if (!in){
++ if(str)
++ YFREE(str);
++ return NULL;
++ }
+
+- prevChunkId = in->hdrChunk;
+
+- if (prevChunkId > 0) {
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
+- buffer, &oldTags);
+
+- yaffs_VerifyObjectHeader(in, oh, &oldTags, 0);
+
+- memcpy(oldName, oh->name, sizeof(oh->name));
+- }
+
+- memset(buffer, 0xFF, dev->nDataBytesPerChunk);
++ if (in) {
++ in->hdr_chunk = 0;
++ in->valid = 1;
++ in->variant_type = type;
+
+- oh->type = in->variantType;
+- oh->yst_mode = in->yst_mode;
+- oh->shadowsObject = oh->inbandShadowsObject = shadows;
++ in->yst_mode = mode;
+
+ #ifdef CONFIG_YAFFS_WINCE
+- oh->win_atime[0] = in->win_atime[0];
+- oh->win_ctime[0] = in->win_ctime[0];
+- oh->win_mtime[0] = in->win_mtime[0];
+- oh->win_atime[1] = in->win_atime[1];
+- oh->win_ctime[1] = in->win_ctime[1];
+- oh->win_mtime[1] = in->win_mtime[1];
++ yfsd_win_file_time_now(in->win_atime);
++ in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
++ in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
++
+ #else
+- oh->yst_uid = in->yst_uid;
+- oh->yst_gid = in->yst_gid;
+- oh->yst_atime = in->yst_atime;
+- oh->yst_mtime = in->yst_mtime;
+- oh->yst_ctime = in->yst_ctime;
+- oh->yst_rdev = in->yst_rdev;
++ in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
++
++ in->yst_rdev = rdev;
++ in->yst_uid = uid;
++ in->yst_gid = gid;
+ #endif
+- if (in->parent)
+- oh->parentObjectId = in->parent->objectId;
+- else
+- oh->parentObjectId = 0;
++ in->n_data_chunks = 0;
+
+- if (name && *name) {
+- memset(oh->name, 0, sizeof(oh->name));
+- yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
+- } else if (prevChunkId >= 0)
+- memcpy(oh->name, oldName, sizeof(oh->name));
+- else
+- memset(oh->name, 0, sizeof(oh->name));
++ yaffs_set_obj_name(in, name);
++ in->dirty = 1;
+
+- oh->isShrink = isShrink;
++ yaffs_add_obj_to_dir(parent, in);
+
+- switch (in->variantType) {
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* Should not happen */
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+- oh->fileSize =
+- (oh->parentObjectId == YAFFS_OBJECTID_DELETED
+- || oh->parentObjectId ==
+- YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
+- fileVariant.fileSize;
++ in->my_dev = parent->my_dev;
++
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+- oh->equivalentObjectId =
+- in->variant.hardLinkVariant.equivalentObjectId;
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* Do nothing */
++ in->variant.hardlink_variant.equiv_obj =
++ equiv_obj;
++ in->variant.hardlink_variant.equiv_id =
++ equiv_obj->obj_id;
++ ylist_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
++ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- yaffs_strncpy(oh->alias,
+- in->variant.symLinkVariant.alias,
+- YAFFS_MAX_ALIAS_LENGTH);
+- oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* do nothing */
+ break;
+ }
+
+- /* Tags */
+- yaffs_InitialiseTags(&newTags);
+- in->serial++;
+- newTags.chunkId = 0;
+- newTags.objectId = in->objectId;
+- newTags.serialNumber = in->serial;
+-
+- /* Add extra info for file header */
+-
+- newTags.extraHeaderInfoAvailable = 1;
+- newTags.extraParentObjectId = oh->parentObjectId;
+- newTags.extraFileLength = oh->fileSize;
+- newTags.extraIsShrinkHeader = oh->isShrink;
+- newTags.extraEquivalentObjectId = oh->equivalentObjectId;
+- newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
+- newTags.extraObjectType = in->variantType;
+-
+- yaffs_VerifyObjectHeader(in, oh, &newTags, 1);
+-
+- /* Create new chunk in NAND */
+- newChunkId =
+- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+- (prevChunkId >= 0) ? 1 : 0);
+-
+- if (newChunkId >= 0) {
+-
+- in->hdrChunk = newChunkId;
+-
+- if (prevChunkId >= 0) {
+- yaffs_DeleteChunk(dev, prevChunkId, 1,
+- __LINE__);
+- }
+-
+- if (!yaffs_ObjectHasCachedWriteData(in))
+- in->dirty = 0;
+-
+- /* If this was a shrink, then mark the block that the chunk lives on */
+- if (isShrink) {
+- bi = yaffs_GetBlockInfo(in->myDev,
+- newChunkId / in->myDev->nChunksPerBlock);
+- bi->hasShrinkHeader = 1;
+- }
+-
++ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
++ /* Could not create the object header, fail the creation */
++ yaffs_del_obj(in);
++ in = NULL;
+ }
+
+- retVal = newChunkId;
+-
++ yaffs_update_parent(parent);
+ }
+
+- if (buffer)
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+-
+- return retVal;
++ return in;
+ }
+
+-/*------------------------ Short Operations Cache ----------------------------------------
+- * In many situations where there is no high level buffering (eg WinCE) a lot of
+- * reads might be short sequential reads, and a lot of writes may be short
+- * sequential writes. eg. scanning/writing a jpeg file.
+- * In these cases, a short read/write cache can provide a huge perfomance benefit
+- * with dumb-as-a-rock code.
+- * In Linux, the page cache provides read buffering aand the short op cache provides write
+- * buffering.
+- *
+- * There are a limited number (~10) of cache chunks per device so that we don't
+- * need a very intelligent search.
+- */
+-
+-static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj)
++yaffs_obj_t *yaffs_create_file(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
+ {
+- yaffs_Device *dev = obj->myDev;
+- int i;
+- yaffs_ChunkCache *cache;
+- int nCaches = obj->myDev->nShortOpCaches;
+-
+- for (i = 0; i < nCaches; i++) {
+- cache = &dev->srCache[i];
+- if (cache->object == obj &&
+- cache->dirty)
+- return 1;
+- }
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++ uid, gid, NULL, NULL, 0);
++}
+
+- return 0;
++yaffs_obj_t *yaffs_create_dir(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++ mode, uid, gid, NULL, NULL, 0);
+ }
+
++yaffs_obj_t *yaffs_create_special(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++ uid, gid, NULL, NULL, rdev);
++}
+
+-static void yaffs_FlushFilesChunkCache(yaffs_Object *obj)
++yaffs_obj_t *yaffs_create_symlink(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid,
++ const YCHAR *alias)
+ {
+- yaffs_Device *dev = obj->myDev;
+- int lowest = -99; /* Stop compiler whining. */
+- int i;
+- yaffs_ChunkCache *cache;
+- int chunkWritten = 0;
+- int nCaches = obj->myDev->nShortOpCaches;
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++ uid, gid, NULL, alias, 0);
++}
+
+- if (nCaches > 0) {
+- do {
+- cache = NULL;
++/* yaffs_link_obj returns the object id of the equivalent object.*/
++yaffs_obj_t *yaffs_link_obj(yaffs_obj_t *parent, const YCHAR *name,
++ yaffs_obj_t *equiv_obj)
++{
++ /* Get the real object in case we were fed a hard link as an equivalent object */
++ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+- /* Find the dirty cache for this object with the lowest chunk id. */
+- for (i = 0; i < nCaches; i++) {
+- if (dev->srCache[i].object == obj &&
+- dev->srCache[i].dirty) {
+- if (!cache
+- || dev->srCache[i].chunkId <
+- lowest) {
+- cache = &dev->srCache[i];
+- lowest = cache->chunkId;
+- }
+- }
+- }
++ if (yaffs_create_obj
++ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
++ equiv_obj, NULL, 0)) {
++ return equiv_obj;
++ } else {
++ return NULL;
++ }
+
+- if (cache && !cache->locked) {
+- /* Write it out and free it up */
++}
+
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(cache->object,
+- cache->chunkId,
+- cache->data,
+- cache->nBytes,
+- 1);
+- cache->dirty = 0;
+- cache->object = NULL;
+- }
++static int yaffs_change_obj_name(yaffs_obj_t *obj, yaffs_obj_t *new_dir,
++ const YCHAR *new_name, int force, int shadows)
++{
++ int unlinkOp;
++ int deleteOp;
+
+- } while (cache && chunkWritten > 0);
++ yaffs_obj_t *existingTarget;
+
+- if (cache) {
+- /* Hoosterman, disk full while writing cache out. */
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
++ if (new_dir == NULL)
++ new_dir = obj->parent; /* use the old directory */
+
+- }
++ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR
++ ("tragedy: yaffs_change_obj_name: new_dir is not a directory"
++ TENDSTR)));
++ YBUG();
+ }
+
+-}
++ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
++ if (obj->my_dev->param.is_yaffs2)
++ unlinkOp = (new_dir == obj->my_dev->unlinked_dir);
++ else
++ unlinkOp = (new_dir == obj->my_dev->unlinked_dir
++ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
+
+-/*yaffs_FlushEntireDeviceCache(dev)
+- *
+- *
+- */
++ deleteOp = (new_dir == obj->my_dev->del_dir);
+
+-void yaffs_FlushEntireDeviceCache(yaffs_Device *dev)
+-{
+- yaffs_Object *obj;
+- int nCaches = dev->nShortOpCaches;
+- int i;
++ existingTarget = yaffs_find_by_name(new_dir, new_name);
+
+- /* Find a dirty object in the cache and flush it...
+- * until there are no further dirty objects.
++ /* If the object is a file going into the unlinked directory,
++ * then it is OK to just stuff it in since duplicate names are allowed.
++ * else only proceed if the new name does not exist and if we're putting
++ * it into a directory.
+ */
+- do {
+- obj = NULL;
+- for (i = 0; i < nCaches && !obj; i++) {
+- if (dev->srCache[i].object &&
+- dev->srCache[i].dirty)
+- obj = dev->srCache[i].object;
+-
+- }
+- if (obj)
+- yaffs_FlushFilesChunkCache(obj);
+-
+- } while (obj);
+-
+-}
++ if ((unlinkOp ||
++ deleteOp ||
++ force ||
++ (shadows > 0) ||
++ !existingTarget) &&
++ new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_set_obj_name(obj, new_name);
++ obj->dirty = 1;
+
++ yaffs_add_obj_to_dir(new_dir, obj);
+
+-/* Grab us a cache chunk for use.
+- * First look for an empty one.
+- * Then look for the least recently used non-dirty one.
+- * Then look for the least recently used dirty one...., flush and look again.
+- */
+-static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device *dev)
+-{
+- int i;
++ if (unlinkOp)
++ obj->unlinked = 1;
+
+- if (dev->nShortOpCaches > 0) {
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (!dev->srCache[i].object)
+- return &dev->srCache[i];
+- }
++ /* If it is a deletion then we mark it as a shrink for gc purposes. */
++ if (yaffs_update_oh(obj, new_name, 0, deleteOp, shadows, NULL) >= 0)
++ return YAFFS_OK;
+ }
+
+- return NULL;
++ return YAFFS_FAIL;
+ }
+
+-static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device *dev)
++int yaffs_rename_obj(yaffs_obj_t *old_dir, const YCHAR *old_name,
++ yaffs_obj_t *new_dir, const YCHAR *new_name)
+ {
+- yaffs_ChunkCache *cache;
+- yaffs_Object *theObj;
+- int usage;
+- int i;
+- int pushout;
+-
+- if (dev->nShortOpCaches > 0) {
+- /* Try find a non-dirty one... */
+-
+- cache = yaffs_GrabChunkCacheWorker(dev);
++ yaffs_obj_t *obj = NULL;
++ yaffs_obj_t *existingTarget = NULL;
++ int force = 0;
++ int result;
++ yaffs_dev_t *dev;
+
+- if (!cache) {
+- /* They were all dirty, find the last recently used object and flush
+- * its cache, then find again.
+- * NB what's here is not very accurate, we actually flush the object
+- * the last recently used page.
+- */
+
+- /* With locking we can't assume we can use entry zero */
++ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
+
+- theObj = NULL;
+- usage = -1;
+- cache = NULL;
+- pushout = -1;
++ dev = old_dir->my_dev;
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object &&
+- !dev->srCache[i].locked &&
+- (dev->srCache[i].lastUse < usage || !cache)) {
+- usage = dev->srCache[i].lastUse;
+- theObj = dev->srCache[i].object;
+- cache = &dev->srCache[i];
+- pushout = i;
+- }
+- }
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++ /* Special case for case insemsitive systems (eg. WinCE).
++ * While look-up is case insensitive, the name isn't.
++ * Therefore we might want to change x.txt to X.txt
++ */
++ if (old_dir == new_dir && yaffs_strcmp(old_name, new_name) == 0)
++ force = 1;
++#endif
+
+- if (!cache || cache->dirty) {
+- /* Flush and try again */
+- yaffs_FlushFilesChunkCache(theObj);
+- cache = yaffs_GrabChunkCacheWorker(dev);
+- }
++ if(yaffs_strnlen(new_name,YAFFS_MAX_NAME_LENGTH+1) > YAFFS_MAX_NAME_LENGTH)
++ /* ENAMETOOLONG */
++ return YAFFS_FAIL;
+
+- }
+- return cache;
+- } else
+- return NULL;
++ obj = yaffs_find_by_name(old_dir, old_name);
+
+-}
++ if (obj && obj->rename_allowed) {
+
+-/* Find a cached chunk */
+-static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj,
+- int chunkId)
+-{
+- yaffs_Device *dev = obj->myDev;
+- int i;
+- if (dev->nShortOpCaches > 0) {
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object == obj &&
+- dev->srCache[i].chunkId == chunkId) {
+- dev->cacheHits++;
++ /* Now do the handling for an existing target, if there is one */
+
+- return &dev->srCache[i];
+- }
++ existingTarget = yaffs_find_by_name(new_dir, new_name);
++ if (existingTarget &&
++ existingTarget->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&existingTarget->variant.dir_variant.children)) {
++ /* There is a target that is a non-empty directory, so we fail */
++ return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
++ } else if (existingTarget && existingTarget != obj) {
++ /* Nuke the target first, using shadowing,
++ * but only if it isn't the same object.
++ *
++ * Note we must disable gc otherwise it can mess up the shadowing.
++ *
++ */
++ dev->gc_disable=1;
++ yaffs_change_obj_name(obj, new_dir, new_name, force,
++ existingTarget->obj_id);
++ existingTarget->is_shadowed = 1;
++ yaffs_unlink_obj(existingTarget);
++ dev->gc_disable=0;
+ }
++
++ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
++
++ yaffs_update_parent(old_dir);
++ if(new_dir != old_dir)
++ yaffs_update_parent(new_dir);
++
++ return result;
+ }
+- return NULL;
++ return YAFFS_FAIL;
+ }
+
+-/* Mark the chunk for the least recently used algorithym */
+-static void yaffs_UseChunkCache(yaffs_Device *dev, yaffs_ChunkCache *cache,
+- int isAWrite)
++/*------------------------- Block Management and Page Allocation ----------------*/
++
++static int yaffs_init_blocks(yaffs_dev_t *dev)
+ {
++ int nBlocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+- if (dev->nShortOpCaches > 0) {
+- if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
+- /* Reset the cache usages */
+- int i;
+- for (i = 1; i < dev->nShortOpCaches; i++)
+- dev->srCache[i].lastUse = 0;
++ dev->block_info = NULL;
++ dev->chunk_bits = NULL;
+
+- dev->srLastUse = 0;
+- }
++ dev->alloc_block = -1; /* force it to get a new one */
+
+- dev->srLastUse++;
++ /* If the first allocation strategy fails, thry the alternate one */
++ dev->block_info = YMALLOC(nBlocks * sizeof(yaffs_block_info_t));
++ if (!dev->block_info) {
++ dev->block_info = YMALLOC_ALT(nBlocks * sizeof(yaffs_block_info_t));
++ dev->block_info_alt = 1;
++ } else
++ dev->block_info_alt = 0;
+
+- cache->lastUse = dev->srLastUse;
++ if (dev->block_info) {
++ /* Set up dynamic blockinfo stuff. */
++ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8; /* round up bytes */
++ dev->chunk_bits = YMALLOC(dev->chunk_bit_stride * nBlocks);
++ if (!dev->chunk_bits) {
++ dev->chunk_bits = YMALLOC_ALT(dev->chunk_bit_stride * nBlocks);
++ dev->chunk_bits_alt = 1;
++ } else
++ dev->chunk_bits_alt = 0;
++ }
+
+- if (isAWrite)
+- cache->dirty = 1;
++ if (dev->block_info && dev->chunk_bits) {
++ memset(dev->block_info, 0, nBlocks * sizeof(yaffs_block_info_t));
++ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * nBlocks);
++ return YAFFS_OK;
+ }
++
++ return YAFFS_FAIL;
+ }
+
+-/* Invalidate a single cache page.
+- * Do this when a whole page gets written,
+- * ie the short cache for this page is no longer valid.
+- */
+-static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId)
++static void yaffs_deinit_blocks(yaffs_dev_t *dev)
+ {
+- if (object->myDev->nShortOpCaches > 0) {
+- yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
++ if (dev->block_info_alt && dev->block_info)
++ YFREE_ALT(dev->block_info);
++ else if (dev->block_info)
++ YFREE(dev->block_info);
+
+- if (cache)
+- cache->object = NULL;
+- }
+-}
++ dev->block_info_alt = 0;
+
+-/* Invalidate all the cache pages associated with this object
+- * Do this whenever ther file is deleted or resized.
+- */
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in)
+-{
+- int i;
+- yaffs_Device *dev = in->myDev;
++ dev->block_info = NULL;
+
+- if (dev->nShortOpCaches > 0) {
+- /* Invalidate it. */
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object == in)
+- dev->srCache[i].object = NULL;
+- }
+- }
++ if (dev->chunk_bits_alt && dev->chunk_bits)
++ YFREE_ALT(dev->chunk_bits);
++ else if (dev->chunk_bits)
++ YFREE(dev->chunk_bits);
++ dev->chunk_bits_alt = 0;
++ dev->chunk_bits = NULL;
+ }
+
+-/*--------------------- Checkpointing --------------------*/
++void yaffs_block_became_dirty(yaffs_dev_t *dev, int block_no)
++{
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, block_no);
+
++ int erasedOk = 0;
+
+-static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev, int head)
+-{
+- yaffs_CheckpointValidity cp;
++ /* If the block is still healthy erase it and mark as clean.
++ * If the block has had a data failure, then retire it.
++ */
+
+- memset(&cp, 0, sizeof(cp));
++ T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++ (TSTR("yaffs_block_became_dirty block %d state %d %s"TENDSTR),
++ block_no, bi->block_state, (bi->needs_retiring) ? "needs retiring" : ""));
+
+- cp.structType = sizeof(cp);
+- cp.magic = YAFFS_MAGIC;
+- cp.version = YAFFS_CHECKPOINT_VERSION;
+- cp.head = (head) ? 1 : 0;
++ yaffs2_clear_oldest_dirty_seq(dev,bi);
+
+- return (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
+- 1 : 0;
+-}
++ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+-static int yaffs_ReadCheckpointValidityMarker(yaffs_Device *dev, int head)
+-{
+- yaffs_CheckpointValidity cp;
+- int ok;
++ /* If this is the block being garbage collected then stop gc'ing this block */
++ if(block_no == dev->gc_block)
++ dev->gc_block = 0;
+
+- ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++ /* If this block is currently the best candidate for gc then drop as a candidate */
++ if(block_no == dev->gc_dirtiest){
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ }
+
+- if (ok)
+- ok = (cp.structType == sizeof(cp)) &&
+- (cp.magic == YAFFS_MAGIC) &&
+- (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+- (cp.head == ((head) ? 1 : 0));
+- return ok ? 1 : 0;
+-}
++ if (!bi->needs_retiring) {
++ yaffs2_checkpt_invalidate(dev);
++ erasedOk = yaffs_erase_block(dev, block_no);
++ if (!erasedOk) {
++ dev->n_erase_failures++;
++ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("**>> Erasure failed %d" TENDSTR), block_no));
++ }
++ }
+
+-static void yaffs_DeviceToCheckpointDevice(yaffs_CheckpointDevice *cp,
+- yaffs_Device *dev)
+-{
+- cp->nErasedBlocks = dev->nErasedBlocks;
+- cp->allocationBlock = dev->allocationBlock;
+- cp->allocationPage = dev->allocationPage;
+- cp->nFreeChunks = dev->nFreeChunks;
++ if (erasedOk &&
++ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) || !yaffs_skip_verification(dev))) {
++ int i;
++ for (i = 0; i < dev->param.chunks_per_block; i++) {
++ if (!yaffs_check_chunk_erased
++ (dev, block_no * dev->param.chunks_per_block + i)) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ (">>Block %d erasure supposedly OK, but chunk %d not erased"
++ TENDSTR), block_no, i));
++ }
++ }
++ }
++
++ if (erasedOk) {
++ /* Clean it up... */
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ bi->seq_number = 0;
++ dev->n_erased_blocks++;
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++ bi->has_shrink_hdr = 0;
++ bi->skip_erased_check = 1; /* This is clean, so no need to check */
++ bi->gc_prioritise = 0;
++ yaffs_clear_chunk_bits(dev, block_no);
+
+- cp->nDeletedFiles = dev->nDeletedFiles;
+- cp->nUnlinkedFiles = dev->nUnlinkedFiles;
+- cp->nBackgroundDeletions = dev->nBackgroundDeletions;
+- cp->sequenceNumber = dev->sequenceNumber;
+- cp->oldestDirtySequence = dev->oldestDirtySequence;
++ T(YAFFS_TRACE_ERASE,
++ (TSTR("Erased block %d" TENDSTR), block_no));
++ } else {
++ dev->n_free_chunks -= dev->param.chunks_per_block; /* We lost a block of free space */
+
++ yaffs_retire_block(dev, block_no);
++ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("**>> Block %d retired" TENDSTR), block_no));
++ }
+ }
+
+-static void yaffs_CheckpointDeviceToDevice(yaffs_Device *dev,
+- yaffs_CheckpointDevice *cp)
++static int yaffs_find_alloc_block(yaffs_dev_t *dev)
+ {
+- dev->nErasedBlocks = cp->nErasedBlocks;
+- dev->allocationBlock = cp->allocationBlock;
+- dev->allocationPage = cp->allocationPage;
+- dev->nFreeChunks = cp->nFreeChunks;
+-
+- dev->nDeletedFiles = cp->nDeletedFiles;
+- dev->nUnlinkedFiles = cp->nUnlinkedFiles;
+- dev->nBackgroundDeletions = cp->nBackgroundDeletions;
+- dev->sequenceNumber = cp->sequenceNumber;
+- dev->oldestDirtySequence = cp->oldestDirtySequence;
+-}
++ int i;
+
++ yaffs_block_info_t *bi;
+
+-static int yaffs_WriteCheckpointDevice(yaffs_Device *dev)
+-{
+- yaffs_CheckpointDevice cp;
+- __u32 nBytes;
+- __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++ if (dev->n_erased_blocks < 1) {
++ /* Hoosterman we've got a problem.
++ * Can't get space to gc
++ */
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
+
+- int ok;
++ return -1;
++ }
+
+- /* Write device runtime values*/
+- yaffs_DeviceToCheckpointDevice(&cp, dev);
+- cp.structType = sizeof(cp);
++ /* Find an empty block. */
+
+- ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ dev->alloc_block_finder++;
++ if (dev->alloc_block_finder < dev->internal_start_block
++ || dev->alloc_block_finder > dev->internal_end_block) {
++ dev->alloc_block_finder = dev->internal_start_block;
++ }
+
+- /* Write block info */
+- if (ok) {
+- nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+- ok = (yaffs_CheckpointWrite(dev, dev->blockInfo, nBytes) == nBytes);
++ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->seq_number++;
++ bi->seq_number = dev->seq_number;
++ dev->n_erased_blocks--;
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("Allocated block %d, seq %d, %d left" TENDSTR),
++ dev->alloc_block_finder, dev->seq_number,
++ dev->n_erased_blocks));
++ return dev->alloc_block_finder;
++ }
+ }
+
+- /* Write chunk bits */
+- if (ok) {
+- nBytes = nBlocks * dev->chunkBitmapStride;
+- ok = (yaffs_CheckpointWrite(dev, dev->chunkBits, nBytes) == nBytes);
+- }
+- return ok ? 1 : 0;
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR
++ ("yaffs tragedy: no more erased blocks, but there should have been %d"
++ TENDSTR), dev->n_erased_blocks));
+
++ return -1;
+ }
+
+-static int yaffs_ReadCheckpointDevice(yaffs_Device *dev)
++
++/*
++ * Check if there's space to allocate...
++ * Thinks.... do we need top make this ths same as yaffs_get_free_chunks()?
++ */
++int yaffs_check_alloc_available(yaffs_dev_t *dev, int n_chunks)
+ {
+- yaffs_CheckpointDevice cp;
+- __u32 nBytes;
+- __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++ int reservedChunks;
++ int reservedBlocks = dev->param.n_reserved_blocks;
++ int checkpointBlocks;
+
+- int ok;
++ checkpointBlocks = yaffs_calc_checkpt_blocks_required(dev);
+
+- ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
+- if (!ok)
+- return 0;
++ reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->param.chunks_per_block);
+
+- if (cp.structType != sizeof(cp))
+- return 0;
++ return (dev->n_free_chunks > (reservedChunks + n_chunks));
++}
++
++static int yaffs_alloc_chunk(yaffs_dev_t *dev, int useReserve,
++ yaffs_block_info_t **blockUsedPtr)
++{
++ int retVal;
++ yaffs_block_info_t *bi;
++
++ if (dev->alloc_block < 0) {
++ /* Get next block to allocate off */
++ dev->alloc_block = yaffs_find_alloc_block(dev);
++ dev->alloc_page = 0;
++ }
+
++ if (!useReserve && !yaffs_check_alloc_available(dev, 1)) {
++ /* Not enough space to allocate unless we're allowed to use the reserve. */
++ return -1;
++ }
+
+- yaffs_CheckpointDeviceToDevice(dev, &cp);
++ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
++ && dev->alloc_page == 0) {
++ T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
++ }
+
+- nBytes = nBlocks * sizeof(yaffs_BlockInfo);
++ /* Next page please.... */
++ if (dev->alloc_block >= 0) {
++ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+- ok = (yaffs_CheckpointRead(dev, dev->blockInfo, nBytes) == nBytes);
++ retVal = (dev->alloc_block * dev->param.chunks_per_block) +
++ dev->alloc_page;
++ bi->pages_in_use++;
++ yaffs_set_chunk_bit(dev, dev->alloc_block,
++ dev->alloc_page);
+
+- if (!ok)
+- return 0;
+- nBytes = nBlocks * dev->chunkBitmapStride;
++ dev->alloc_page++;
+
+- ok = (yaffs_CheckpointRead(dev, dev->chunkBits, nBytes) == nBytes);
++ dev->n_free_chunks--;
+
+- return ok ? 1 : 0;
+-}
++ /* If the block is full set the state to full */
++ if (dev->alloc_page >= dev->param.chunks_per_block) {
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
+
+-static void yaffs_ObjectToCheckpointObject(yaffs_CheckpointObject *cp,
+- yaffs_Object *obj)
+-{
++ if (blockUsedPtr)
++ *blockUsedPtr = bi;
++
++ return retVal;
++ }
+
+- cp->objectId = obj->objectId;
+- cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
+- cp->hdrChunk = obj->hdrChunk;
+- cp->variantType = obj->variantType;
+- cp->deleted = obj->deleted;
+- cp->softDeleted = obj->softDeleted;
+- cp->unlinked = obj->unlinked;
+- cp->fake = obj->fake;
+- cp->renameAllowed = obj->renameAllowed;
+- cp->unlinkAllowed = obj->unlinkAllowed;
+- cp->serial = obj->serial;
+- cp->nDataChunks = obj->nDataChunks;
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
+- else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+- cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
++ return -1;
+ }
+
+-static int yaffs_CheckpointObjectToObject(yaffs_Object *obj, yaffs_CheckpointObject *cp)
++static int yaffs_get_erased_chunks(yaffs_dev_t *dev)
+ {
++ int n;
+
+- yaffs_Object *parent;
++ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+- if (obj->variantType != cp->variantType) {
+- T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
+- TCONT("chunk %d does not match existing object type %d")
+- TENDSTR), cp->objectId, cp->variantType, cp->hdrChunk,
+- obj->variantType));
+- return 0;
+- }
++ if (dev->alloc_block > 0)
++ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+- obj->objectId = cp->objectId;
++ return n;
+
+- if (cp->parentId)
+- parent = yaffs_FindOrCreateObjectByNumber(
+- obj->myDev,
+- cp->parentId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+- else
+- parent = NULL;
++}
+
+- if (parent) {
+- if (parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
+- TCONT(" chunk %d Parent type, %d, not directory")
+- TENDSTR),
+- cp->objectId, cp->parentId, cp->variantType,
+- cp->hdrChunk, parent->variantType));
+- return 0;
++/*
++ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
++ * if we don't want to write to it.
++ */
++void yaffs_skip_rest_of_block(yaffs_dev_t *dev)
++{
++ if(dev->alloc_block > 0){
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, dev->alloc_block);
++ if(bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING){
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
+ }
+- yaffs_AddObjectToDirectory(parent, obj);
+ }
+-
+- obj->hdrChunk = cp->hdrChunk;
+- obj->variantType = cp->variantType;
+- obj->deleted = cp->deleted;
+- obj->softDeleted = cp->softDeleted;
+- obj->unlinked = cp->unlinked;
+- obj->fake = cp->fake;
+- obj->renameAllowed = cp->renameAllowed;
+- obj->unlinkAllowed = cp->unlinkAllowed;
+- obj->serial = cp->serial;
+- obj->nDataChunks = cp->nDataChunks;
+-
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
+- else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+- obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
+-
+- if (obj->hdrChunk > 0)
+- obj->lazyLoaded = 1;
+- return 1;
+ }
+
+
+-
+-static int yaffs_CheckpointTnodeWorker(yaffs_Object *in, yaffs_Tnode *tn,
+- __u32 level, int chunkOffset)
++static int yaffs_gc_block(yaffs_dev_t *dev, int block,
++ int wholeBlock)
+ {
++ int oldChunk;
++ int newChunk;
++ int mark_flash;
++ int retVal = YAFFS_OK;
+ int i;
+- yaffs_Device *dev = in->myDev;
+- int ok = 1;
+- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+-
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
++ int isCheckpointBlock;
++ int matchingChunk;
++ int maxCopies;
+
++ int chunksBefore = yaffs_get_erased_chunks(dev);
++ int chunksAfter;
+
+- if (tn) {
+- if (level > 0) {
++ yaffs_ext_tags tags;
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+- if (tn->internal[i]) {
+- ok = yaffs_CheckpointTnodeWorker(in,
+- tn->internal[i],
+- level - 1,
+- (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
+- }
+- }
+- } else if (level == 0) {
+- __u32 baseOffset = chunkOffset << YAFFS_TNODES_LEVEL0_BITS;
+- ok = (yaffs_CheckpointWrite(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
+- if (ok)
+- ok = (yaffs_CheckpointWrite(dev, tn, tnodeSize) == tnodeSize);
+- }
+- }
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, block);
+
+- return ok;
++ yaffs_obj_t *object;
+
+-}
++ isCheckpointBlock = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+-static int yaffs_WriteCheckpointTnodes(yaffs_Object *obj)
+-{
+- __u32 endMarker = ~0;
+- int ok = 1;
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+- ok = yaffs_CheckpointTnodeWorker(obj,
+- obj->variant.fileVariant.top,
+- obj->variant.fileVariant.topLevel,
+- 0);
+- if (ok)
+- ok = (yaffs_CheckpointWrite(obj->myDev, &endMarker, sizeof(endMarker)) ==
+- sizeof(endMarker));
+- }
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
++ block,
++ bi->pages_in_use,
++ bi->has_shrink_hdr,
++ wholeBlock));
+
+- return ok ? 1 : 0;
+-}
++ /*yaffs_verify_free_chunks(dev); */
+
+-static int yaffs_ReadCheckpointTnodes(yaffs_Object *obj)
+-{
+- __u32 baseChunk;
+- int ok = 1;
+- yaffs_Device *dev = obj->myDev;
+- yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
+- yaffs_Tnode *tn;
+- int nread = 0;
+- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ if(bi->block_state == YAFFS_BLOCK_STATE_FULL)
++ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
++
++ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
++ dev->gc_disable = 1;
+
+- ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++ if (isCheckpointBlock ||
++ !yaffs_still_some_chunks(dev, block)) {
++ T(YAFFS_TRACE_TRACING,
++ (TSTR
++ ("Collecting block %d that has no chunks in use" TENDSTR),
++ block));
++ yaffs_block_became_dirty(dev, block);
++ } else {
+
+- while (ok && (~baseChunk)) {
+- nread++;
+- /* Read level 0 tnode */
++ __u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
++ yaffs_verify_blk(dev, bi, block);
+
+- tn = yaffs_GetTnodeRaw(dev);
+- if (tn)
+- ok = (yaffs_CheckpointRead(dev, tn, tnodeSize) == tnodeSize);
+- else
+- ok = 0;
++ maxCopies = (wholeBlock) ? dev->param.chunks_per_block : 5;
++ oldChunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+- if (tn && ok)
+- ok = yaffs_AddOrFindLevel0Tnode(dev,
+- fileStructPtr,
+- baseChunk,
+- tn) ? 1 : 0;
++ for (/* init already done */;
++ retVal == YAFFS_OK &&
++ dev->gc_chunk < dev->param.chunks_per_block &&
++ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
++ maxCopies > 0;
++ dev->gc_chunk++, oldChunk++) {
++ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+- if (ok)
+- ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++ /* This page is in use and might need to be copied off */
+
+- }
++ maxCopies--;
+
+- T(YAFFS_TRACE_CHECKPOINT, (
+- TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
+- nread, baseChunk, ok));
++ mark_flash = 1;
+
+- return ok ? 1 : 0;
+-}
++ yaffs_init_tags(&tags);
+
++ yaffs_rd_chunk_tags_nand(dev, oldChunk,
++ buffer, &tags);
+
+-static int yaffs_WriteCheckpointObjects(yaffs_Device *dev)
+-{
+- yaffs_Object *obj;
+- yaffs_CheckpointObject cp;
+- int i;
+- int ok = 1;
+- struct ylist_head *lh;
++ object =
++ yaffs_find_by_number(dev,
++ tags.obj_id);
+
++ T(YAFFS_TRACE_GC_DETAIL,
++ (TSTR
++ ("Collecting chunk in block %d, %d %d %d " TENDSTR),
++ dev->gc_chunk, tags.obj_id, tags.chunk_id,
++ tags.n_bytes));
+
+- /* Iterate through the objects in each hash entry,
+- * dumping them to the checkpointing stream.
+- */
++ if (object && !yaffs_skip_verification(dev)) {
++ if (tags.chunk_id == 0)
++ matchingChunk = object->hdr_chunk;
++ else if (object->soft_del)
++ matchingChunk = oldChunk; /* Defeat the test */
++ else
++ matchingChunk = yaffs_find_chunk_in_file(object, tags.chunk_id, NULL);
+
+- for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+- ylist_for_each(lh, &dev->objectBucket[i].list) {
+- if (lh) {
+- obj = ylist_entry(lh, yaffs_Object, hashLink);
+- if (!obj->deferedFree) {
+- yaffs_ObjectToCheckpointObject(&cp, obj);
+- cp.structType = sizeof(cp);
+-
+- T(YAFFS_TRACE_CHECKPOINT, (
+- TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+- cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, (unsigned) obj));
++ if (oldChunk != matchingChunk)
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
++ oldChunk, matchingChunk, tags.obj_id, tags.chunk_id));
+
+- ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++ }
+
+- if (ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- ok = yaffs_WriteCheckpointTnodes(obj);
++ if (!object) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("page %d in gc has no object: %d %d %d "
++ TENDSTR), oldChunk,
++ tags.obj_id, tags.chunk_id, tags.n_bytes));
+ }
+- }
+- }
+- }
+
+- /* Dump end of list */
+- memset(&cp, 0xFF, sizeof(yaffs_CheckpointObject));
+- cp.structType = sizeof(cp);
++ if (object &&
++ object->deleted &&
++ object->soft_del &&
++ tags.chunk_id != 0) {
++ /* Data chunk in a soft deleted file, throw it away
++ * It's a soft deleted data chunk,
++ * No need to copy this, just forget about it and
++ * fix up the object.
++ */
++
++ /* Free chunks already includes softdeleted chunks.
++ * How ever this chunk is going to soon be really deleted
++ * which will increment free chunks.
++ * We have to decrement free chunks so this works out properly.
++ */
++ dev->n_free_chunks--;
++ bi->soft_del_pages--;
++
++ object->n_data_chunks--;
+
+- if (ok)
+- ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (object->n_data_chunks <= 0) {
++ /* remeber to clean up the object */
++ dev->gc_cleanup_list[dev->n_clean_ups] =
++ tags.obj_id;
++ dev->n_clean_ups++;
++ }
++ mark_flash = 0;
++ } else if (0) {
++ /* Todo object && object->deleted && object->n_data_chunks == 0 */
++ /* Deleted object header with no data chunks.
++ * Can be discarded and the file deleted.
++ */
++ object->hdr_chunk = 0;
++ yaffs_free_tnode(object->my_dev,
++ object->variant.
++ file_variant.top);
++ object->variant.file_variant.top = NULL;
++ yaffs_generic_obj_del(object);
+
+- return ok ? 1 : 0;
+-}
++ } else if (object) {
++ /* It's either a data chunk in a live file or
++ * an ObjectHeader, so we're interested in it.
++ * NB Need to keep the ObjectHeaders of deleted files
++ * until the whole file has been deleted off
++ */
++ tags.serial_number++;
+
+-static int yaffs_ReadCheckpointObjects(yaffs_Device *dev)
+-{
+- yaffs_Object *obj;
+- yaffs_CheckpointObject cp;
+- int ok = 1;
+- int done = 0;
+- yaffs_Object *hardList = NULL;
++ dev->n_gc_copies++;
+
+- while (ok && !done) {
+- ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
+- if (cp.structType != sizeof(cp)) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
+- cp.structType, sizeof(cp), ok));
+- ok = 0;
+- }
+-
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
+- cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk));
+-
+- if (ok && cp.objectId == ~0)
+- done = 1;
+- else if (ok) {
+- obj = yaffs_FindOrCreateObjectByNumber(dev, cp.objectId, cp.variantType);
+- if (obj) {
+- ok = yaffs_CheckpointObjectToObject(obj, &cp);
+- if (!ok)
+- break;
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+- ok = yaffs_ReadCheckpointTnodes(obj);
+- } else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- obj->hardLinks.next =
+- (struct ylist_head *) hardList;
+- hardList = obj;
+- }
+- } else
+- ok = 0;
+- }
+- }
++ if (tags.chunk_id == 0) {
++ /* It is an object Id,
++ * We need to nuke the shrinkheader flags first
++ * Also need to clean up shadowing.
++ * We no longer want the shrinkHeader flag since its work is done
++ * and if it is left in place it will mess up scanning.
++ */
+
+- if (ok)
+- yaffs_HardlinkFixup(dev, hardList);
++ yaffs_obj_header *oh;
++ oh = (yaffs_obj_header *)buffer;
+
+- return ok ? 1 : 0;
+-}
++ oh->is_shrink = 0;
++ tags.extra_is_shrink = 0;
+
+-static int yaffs_WriteCheckpointSum(yaffs_Device *dev)
+-{
+- __u32 checkpointSum;
+- int ok;
++ oh->shadows_obj = 0;
++ oh->inband_shadowed_obj_id = 0;
++ tags.extra_shadows = 0;
++
++ /* Update file size */
++ if(object->variant_type == YAFFS_OBJECT_TYPE_FILE){
++ oh->file_size = object->variant.file_variant.file_size;
++ tags.extra_length = oh->file_size;
++ }
++
++ yaffs_verify_oh(object, oh, &tags, 1);
++ newChunk =
++ yaffs_write_new_chunk(dev,(__u8 *) oh, &tags, 1);
++ } else
++ newChunk =
++ yaffs_write_new_chunk(dev, buffer, &tags, 1);
++
++ if (newChunk < 0) {
++ retVal = YAFFS_FAIL;
++ } else {
+
+- yaffs_GetCheckpointSum(dev, &checkpointSum);
++ /* Ok, now fix up the Tnodes etc. */
+
+- ok = (yaffs_CheckpointWrite(dev, &checkpointSum, sizeof(checkpointSum)) == sizeof(checkpointSum));
++ if (tags.chunk_id == 0) {
++ /* It's a header */
++ object->hdr_chunk = newChunk;
++ object->serial = tags.serial_number;
++ } else {
++ /* It's a data chunk */
++ int ok;
++ ok = yaffs_put_chunk_in_file
++ (object,
++ tags.chunk_id,
++ newChunk, 0);
++ }
++ }
++ }
+
+- if (!ok)
+- return 0;
++ if (retVal == YAFFS_OK)
++ yaffs_chunk_del(dev, oldChunk, mark_flash, __LINE__);
+
+- return 1;
+-}
++ }
++ }
+
+-static int yaffs_ReadCheckpointSum(yaffs_Device *dev)
+-{
+- __u32 checkpointSum0;
+- __u32 checkpointSum1;
+- int ok;
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+- yaffs_GetCheckpointSum(dev, &checkpointSum0);
+
+- ok = (yaffs_CheckpointRead(dev, &checkpointSum1, sizeof(checkpointSum1)) == sizeof(checkpointSum1));
+
+- if (!ok)
+- return 0;
++ }
+
+- if (checkpointSum0 != checkpointSum1)
+- return 0;
++ yaffs_verify_collected_blk(dev, bi, block);
+
+- return 1;
+-}
+
+
+-static int yaffs_WriteCheckpointData(yaffs_Device *dev)
+-{
+- int ok = 1;
++ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ /*
++ * The gc did not complete. Set block state back to FULL
++ * because checkpointing does not restore gc.
++ */
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ } else {
++ /* The gc completed. */
++ /* Do any required cleanups */
++ for (i = 0; i < dev->n_clean_ups; i++) {
++ /* Time to delete the file too */
++ object =
++ yaffs_find_by_number(dev,
++ dev->gc_cleanup_list[i]);
++ if (object) {
++ yaffs_free_tnode(dev,
++ object->variant.file_variant.
++ top);
++ object->variant.file_variant.top = NULL;
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("yaffs: About to finally delete object %d"
++ TENDSTR), object->obj_id));
++ yaffs_generic_obj_del(object);
++ object->my_dev->n_deleted_files--;
++ }
+
+- if (dev->skipCheckpointWrite || !dev->isYaffs2) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
+- ok = 0;
+- }
++ }
+
+- if (ok)
+- ok = yaffs_CheckpointOpen(dev, 1);
+
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev, 1);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
+- ok = yaffs_WriteCheckpointDevice(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
+- ok = yaffs_WriteCheckpointObjects(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev, 0);
++ chunksAfter = yaffs_get_erased_chunks(dev);
++ if (chunksBefore >= chunksAfter) {
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("gc did not increase free chunks before %d after %d"
++ TENDSTR), chunksBefore, chunksAfter));
++ }
++ dev->gc_block = 0;
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
+ }
+
+- if (ok)
+- ok = yaffs_WriteCheckpointSum(dev);
+-
+- if (!yaffs_CheckpointClose(dev))
+- ok = 0;
+-
+- if (ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
++ dev->gc_disable = 0;
+
+- return dev->isCheckpointed;
++ return retVal;
+ }
+
+-static int yaffs_ReadCheckpointData(yaffs_Device *dev)
++/*
++ * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static unsigned yaffs_find_gc_block(yaffs_dev_t *dev,
++ int aggressive,
++ int background)
+ {
+- int ok = 1;
++ int i;
++ int iterations;
++ unsigned selected = 0;
++ int prioritised = 0;
++ int prioritisedExists = 0;
++ yaffs_block_info_t *bi;
++ int threshold;
+
+- if (dev->skipCheckpointRead || !dev->isYaffs2) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
+- ok = 0;
+- }
++ /* First let's see if we need to grab a prioritised block */
++ if (dev->has_pending_prioritised_gc && !aggressive) {
++ dev->gc_dirtiest = 0;
++ bi = dev->block_info;
++ for (i = dev->internal_start_block;
++ i <= dev->internal_end_block && !selected;
++ i++) {
++
++ if (bi->gc_prioritise) {
++ prioritisedExists = 1;
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ selected = i;
++ prioritised = 1;
++ }
++ }
++ bi++;
++ }
+
+- if (ok)
+- ok = yaffs_CheckpointOpen(dev, 0); /* open for read */
++ /*
++ * If there is a prioritised block and none was selected then
++ * this happened because there is at least one old dirty block gumming
++ * up the works. Let's gc the oldest dirty block.
++ */
+
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev, 1);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
+- ok = yaffs_ReadCheckpointDevice(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
+- ok = yaffs_ReadCheckpointObjects(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev, 0);
+- }
++ if(prioritisedExists &&
++ !selected &&
++ dev->oldest_dirty_block > 0)
++ selected = dev->oldest_dirty_block;
+
+- if (ok) {
+- ok = yaffs_ReadCheckpointSum(dev);
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
++ if (!prioritisedExists) /* None found, so we can clear this */
++ dev->has_pending_prioritised_gc = 0;
+ }
+
+- if (!yaffs_CheckpointClose(dev))
+- ok = 0;
+-
+- if (ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
+-
+- return ok ? 1 : 0;
++ /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
++ * search harder.
++ * else (we're doing a leasurely gc), then we only bother to do this if the
++ * block has only a few pages in use.
++ */
+
+-}
++ if (!selected){
++ int pagesUsed;
++ int nBlocks = dev->internal_end_block - dev->internal_start_block + 1;
++ if (aggressive){
++ threshold = dev->param.chunks_per_block;
++ iterations = nBlocks;
++ } else {
++ int maxThreshold;
+
+-static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
+-{
+- if (dev->isCheckpointed ||
+- dev->blocksInCheckpoint > 0) {
+- dev->isCheckpointed = 0;
+- yaffs_CheckpointInvalidateStream(dev);
+- if (dev->superBlock && dev->markSuperBlockDirty)
+- dev->markSuperBlockDirty(dev->superBlock);
+- }
+-}
++ if(background)
++ maxThreshold = dev->param.chunks_per_block/2;
++ else
++ maxThreshold = dev->param.chunks_per_block/8;
+
++ if(maxThreshold < YAFFS_GC_PASSIVE_THRESHOLD)
++ maxThreshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+-int yaffs_CheckpointSave(yaffs_Device *dev)
+-{
++ threshold = background ?
++ (dev->gc_not_done + 2) * 2 : 0;
++ if(threshold <YAFFS_GC_PASSIVE_THRESHOLD)
++ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++ if(threshold > maxThreshold)
++ threshold = maxThreshold;
+
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ iterations = nBlocks / 16 + 1;
++ if (iterations > 100)
++ iterations = 100;
++ }
+
+- yaffs_VerifyObjects(dev);
+- yaffs_VerifyBlocks(dev);
+- yaffs_VerifyFreeChunks(dev);
++ for (i = 0;
++ i < iterations &&
++ (dev->gc_dirtiest < 1 ||
++ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
++ i++) {
++ dev->gc_block_finder++;
++ if (dev->gc_block_finder < dev->internal_start_block ||
++ dev->gc_block_finder > dev->internal_end_block)
++ dev->gc_block_finder = dev->internal_start_block;
+
+- if (!dev->isCheckpointed) {
+- yaffs_InvalidateCheckpoint(dev);
+- yaffs_WriteCheckpointData(dev);
+- }
++ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+- T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ pagesUsed = bi->pages_in_use - bi->soft_del_pages;
+
+- return dev->isCheckpointed;
+-}
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ pagesUsed < dev->param.chunks_per_block &&
++ (dev->gc_dirtiest < 1 || pagesUsed < dev->gc_pages_in_use) &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ dev->gc_dirtiest = dev->gc_block_finder;
++ dev->gc_pages_in_use = pagesUsed;
++ }
++ }
+
+-int yaffs_CheckpointRestore(yaffs_Device *dev)
+-{
+- int retval;
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ if(dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
++ selected = dev->gc_dirtiest;
++ }
+
+- retval = yaffs_ReadCheckpointData(dev);
++ /*
++ * If nothing has been selected for a while, try selecting the oldest dirty
++ * because that's gumming up the works.
++ */
+
+- if (dev->isCheckpointed) {
+- yaffs_VerifyObjects(dev);
+- yaffs_VerifyBlocks(dev);
+- yaffs_VerifyFreeChunks(dev);
++ if(!selected && dev->param.is_yaffs2 &&
++ dev->gc_not_done >= ( background ? 10 : 20)){
++ yaffs2_find_oldest_dirty_seq(dev);
++ if(dev->oldest_dirty_block > 0) {
++ selected = dev->oldest_dirty_block;
++ dev->gc_dirtiest = selected;
++ dev->oldest_dirty_gc_count++;
++ bi = yaffs_get_block_info(dev, selected);
++ dev->gc_pages_in_use = bi->pages_in_use - bi->soft_del_pages;
++ } else
++ dev->gc_not_done = 0;
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ if(selected){
++ T(YAFFS_TRACE_GC,
++ (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR),
++ selected,
++ dev->param.chunks_per_block - dev->gc_pages_in_use,
++ prioritised));
++
++ dev->n_gc_blocks++;
++ if(background)
++ dev->bg_gcs++;
++
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ dev->gc_not_done = 0;
++ if(dev->refresh_skip > 0)
++ dev->refresh_skip--;
++ } else{
++ dev->gc_not_done++;
++ T(YAFFS_TRACE_GC,
++ (TSTR("GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s" TENDSTR),
++ dev->gc_block_finder, dev->gc_not_done,
++ threshold,
++ dev->gc_dirtiest, dev->gc_pages_in_use,
++ dev->oldest_dirty_block,
++ background ? " bg" : ""));
++ }
+
+- return retval;
++ return selected;
+ }
+
+-/*--------------------- File read/write ------------------------
+- * Read and write have very similar structures.
+- * In general the read/write has three parts to it
+- * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+- * Some complete chunks
+- * An incomplete chunk to end off with
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+- * Curve-balls: the first chunk might also be the last chunk.
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
+ */
+-
+-int yaffs_ReadDataFromFile(yaffs_Object *in, __u8 *buffer, loff_t offset,
+- int nBytes)
++static int yaffs_check_gc(yaffs_dev_t *dev, int background)
+ {
++ int aggressive = 0;
++ int gcOk = YAFFS_OK;
++ int maxTries = 0;
++ int minErased;
++ int erasedChunks;
++ int checkpointBlockAdjust;
+
+- int chunk;
+- __u32 start;
+- int nToCopy;
+- int n = nBytes;
+- int nDone = 0;
+- yaffs_ChunkCache *cache;
+-
+- yaffs_Device *dev;
+-
+- dev = in->myDev;
+-
+- while (n > 0) {
+- /* chunk = offset / dev->nDataBytesPerChunk + 1; */
+- /* start = offset % dev->nDataBytesPerChunk; */
+- yaffs_AddrToChunk(dev, offset, &chunk, &start);
+- chunk++;
+-
+- /* OK now check for the curveball where the start and end are in
+- * the same chunk.
+- */
+- if ((start + n) < dev->nDataBytesPerChunk)
+- nToCopy = n;
+- else
+- nToCopy = dev->nDataBytesPerChunk - start;
+-
+- cache = yaffs_FindChunkCache(in, chunk);
+-
+- /* If the chunk is already in the cache or it is less than a whole chunk
+- * or we're using inband tags then use the cache (if there is caching)
+- * else bypass the cache.
+- */
+- if (cache || nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+- if (dev->nShortOpCaches > 0) {
+-
+- /* If we can't find the data in the cache, then load it up. */
++ if(dev->param.gc_control &&
++ (dev->param.gc_control(dev) & 1) == 0)
++ return YAFFS_OK;
+
+- if (!cache) {
+- cache = yaffs_GrabChunkCache(in->myDev);
+- cache->object = in;
+- cache->chunkId = chunk;
+- cache->dirty = 0;
+- cache->locked = 0;
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- cache->
+- data);
+- cache->nBytes = 0;
+- }
++ if (dev->gc_disable) {
++ /* Bail out so we don't get recursive gc */
++ return YAFFS_OK;
++ }
+
+- yaffs_UseChunkCache(dev, cache, 0);
++ /* This loop should pass the first time.
++ * We'll only see looping here if the collection does not increase space.
++ */
+
+- cache->locked = 1;
++ do {
++ maxTries++;
+
++ checkpointBlockAdjust = yaffs_calc_checkpt_blocks_required(dev);
+
+- memcpy(buffer, &cache->data[start], nToCopy);
++ minErased = dev->param.n_reserved_blocks + checkpointBlockAdjust + 1;
++ erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+- cache->locked = 0;
+- } else {
+- /* Read into the local buffer then copy..*/
++ /* If we need a block soon then do aggressive gc.*/
++ if (dev->n_erased_blocks < minErased)
++ aggressive = 1;
++ else {
++ if(!background && erasedChunks > (dev->n_free_chunks / 4))
++ break;
+
+- __u8 *localBuffer =
+- yaffs_GetTempBuffer(dev, __LINE__);
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- localBuffer);
++ if(dev->gc_skip > 20)
++ dev->gc_skip = 20;
++ if(erasedChunks < dev->n_free_chunks/2 ||
++ dev->gc_skip < 1 ||
++ background)
++ aggressive = 0;
++ else {
++ dev->gc_skip--;
++ break;
++ }
++ }
+
+- memcpy(buffer, &localBuffer[start], nToCopy);
++ dev->gc_skip = 5;
+
++ /* If we don't already have a block being gc'd then see if we should start another */
+
+- yaffs_ReleaseTempBuffer(dev, localBuffer,
+- __LINE__);
+- }
++ if (dev->gc_block < 1 && !aggressive) {
++ dev->gc_block = yaffs2_find_refresh_block(dev);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups=0;
++ }
++ if (dev->gc_block < 1) {
++ dev->gc_block = yaffs_find_gc_block(dev, aggressive, background);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups=0;
++ }
+
+- } else {
++ if (dev->gc_block > 0) {
++ dev->all_gcs++;
++ if (!aggressive)
++ dev->passive_gc_count++;
+
+- /* A full chunk. Read directly into the supplied buffer. */
+- yaffs_ReadChunkDataFromObject(in, chunk, buffer);
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
++ dev->n_erased_blocks, aggressive));
+
++ gcOk = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+- n -= nToCopy;
+- offset += nToCopy;
+- buffer += nToCopy;
+- nDone += nToCopy;
+-
+- }
++ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) && dev->gc_block > 0) {
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
++ TENDSTR), dev->n_erased_blocks, maxTries, dev->gc_block));
++ }
++ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
++ (dev->gc_block > 0) &&
++ (maxTries < 2));
+
+- return nDone;
++ return aggressive ? gcOk : YAFFS_OK;
+ }
+
+-int yaffs_WriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset,
+- int nBytes, int writeThrough)
++/*
++ * yaffs_bg_gc()
++ * Garbage collects. Intended to be called from a background thread.
++ * Returns non-zero if at least half the free chunks are erased.
++ */
++int yaffs_bg_gc(yaffs_dev_t *dev, unsigned urgency)
+ {
++ int erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+- int chunk;
+- __u32 start;
+- int nToCopy;
+- int n = nBytes;
+- int nDone = 0;
+- int nToWriteBack;
+- int startOfWrite = offset;
+- int chunkWritten = 0;
+- __u32 nBytesRead;
+- __u32 chunkStart;
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Background gc %u" TENDSTR),urgency));
+
+- yaffs_Device *dev;
++ yaffs_check_gc(dev, 1);
++ return erasedChunks > dev->n_free_chunks/2;
++}
+
+- dev = in->myDev;
++/*------------------------- TAGS --------------------------------*/
+
+- while (n > 0 && chunkWritten >= 0) {
+- /* chunk = offset / dev->nDataBytesPerChunk + 1; */
+- /* start = offset % dev->nDataBytesPerChunk; */
+- yaffs_AddrToChunk(dev, offset, &chunk, &start);
++static int yaffs_tags_match(const yaffs_ext_tags *tags, int obj_id,
++ int chunkInObject)
++{
++ return (tags->chunk_id == chunkInObject &&
++ tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
+
+- if (chunk * dev->nDataBytesPerChunk + start != offset ||
+- start >= dev->nDataBytesPerChunk) {
+- T(YAFFS_TRACE_ERROR, (
+- TSTR("AddrToChunk of offset %d gives chunk %d start %d"
+- TENDSTR),
+- (int)offset, chunk, start));
+- }
+- chunk++;
++}
+
+- /* OK now check for the curveball where the start and end are in
+- * the same chunk.
+- */
+
+- if ((start + n) < dev->nDataBytesPerChunk) {
+- nToCopy = n;
++/*-------------------- Data file manipulation -----------------*/
+
+- /* Now folks, to calculate how many bytes to write back....
+- * If we're overwriting and not writing to then end of file then
+- * we need to write back as much as was there before.
+- */
++static int yaffs_find_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ yaffs_ext_tags *tags)
++{
++ /*Get the Tnode, then get the level 0 offset chunk offset */
++ yaffs_tnode_t *tn;
++ int theChunk = -1;
++ yaffs_ext_tags localTags;
++ int retVal = -1;
+
+- chunkStart = ((chunk - 1) * dev->nDataBytesPerChunk);
++ yaffs_dev_t *dev = in->my_dev;
+
+- if (chunkStart > in->variant.fileVariant.fileSize)
+- nBytesRead = 0; /* Past end of file */
+- else
+- nBytesRead = in->variant.fileVariant.fileSize - chunkStart;
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &localTags;
++ }
+
+- if (nBytesRead > dev->nDataBytesPerChunk)
+- nBytesRead = dev->nDataBytesPerChunk;
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+- nToWriteBack =
+- (nBytesRead >
+- (start + n)) ? nBytesRead : (start + n);
++ if (tn) {
++ theChunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+- if (nToWriteBack < 0 || nToWriteBack > dev->nDataBytesPerChunk)
+- YBUG();
++ retVal =
++ yaffs_find_chunk_in_group(dev, theChunk, tags, in->obj_id,
++ inode_chunk);
++ }
++ return retVal;
++}
+
+- } else {
+- nToCopy = dev->nDataBytesPerChunk - start;
+- nToWriteBack = dev->nDataBytesPerChunk;
+- }
++static int yaffs_find_del_file_chunk(yaffs_obj_t *in, int inode_chunk,
++ yaffs_ext_tags *tags)
++{
++ /* Get the Tnode, then get the level 0 offset chunk offset */
++ yaffs_tnode_t *tn;
++ int theChunk = -1;
++ yaffs_ext_tags localTags;
+
+- if (nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+- /* An incomplete start or end chunk (or maybe both start and end chunk),
+- * or we're using inband tags, so we want to use the cache buffers.
+- */
+- if (dev->nShortOpCaches > 0) {
+- yaffs_ChunkCache *cache;
+- /* If we can't find the data in the cache, then load the cache */
+- cache = yaffs_FindChunkCache(in, chunk);
++ yaffs_dev_t *dev = in->my_dev;
++ int retVal = -1;
+
+- if (!cache
+- && yaffs_CheckSpaceForAllocation(in->
+- myDev)) {
+- cache = yaffs_GrabChunkCache(in->myDev);
+- cache->object = in;
+- cache->chunkId = chunk;
+- cache->dirty = 0;
+- cache->locked = 0;
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- cache->
+- data);
+- } else if (cache &&
+- !cache->dirty &&
+- !yaffs_CheckSpaceForAllocation(in->myDev)) {
+- /* Drop the cache if it was a read cache item and
+- * no space check has been made for it.
+- */
+- cache = NULL;
+- }
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &localTags;
++ }
+
+- if (cache) {
+- yaffs_UseChunkCache(dev, cache, 1);
+- cache->locked = 1;
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
++ if (tn) {
+
+- memcpy(&cache->data[start], buffer,
+- nToCopy);
++ theChunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
++ retVal =
++ yaffs_find_chunk_in_group(dev, theChunk, tags, in->obj_id,
++ inode_chunk);
+
+- cache->locked = 0;
+- cache->nBytes = nToWriteBack;
++ /* Delete the entry in the filestructure (if found) */
++ if (retVal != -1)
++ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
++ }
+
+- if (writeThrough) {
+- chunkWritten =
+- yaffs_WriteChunkDataToObject
+- (cache->object,
+- cache->chunkId,
+- cache->data, cache->nBytes,
+- 1);
+- cache->dirty = 0;
+- }
++ return retVal;
++}
+
+- } else {
+- chunkWritten = -1; /* fail the write */
+- }
+- } else {
+- /* An incomplete start or end chunk (or maybe both start and end chunk)
+- * Read into the local buffer then copy, then copy over and write back.
+- */
+
+- __u8 *localBuffer =
+- yaffs_GetTempBuffer(dev, __LINE__);
++int yaffs_put_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ int nand_chunk, int in_scan)
++{
++ /* NB in_scan is zero unless scanning.
++ * For forward scanning, in_scan is > 0;
++ * for backward scanning in_scan is < 0
++ *
++ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
++ */
+
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- localBuffer);
++ yaffs_tnode_t *tn;
++ yaffs_dev_t *dev = in->my_dev;
++ int existingChunk;
++ yaffs_ext_tags existingTags;
++ yaffs_ext_tags newTags;
++ unsigned existingSerial, newSerial;
+
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
++ /* Just ignore an attempt at putting a chunk into a non-file during scanning
++ * If it is not during Scanning then something went wrong!
++ */
++ if (!in_scan) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy:attempt to put data chunk into a non-file"
++ TENDSTR)));
++ YBUG();
++ }
+
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ return YAFFS_OK;
++ }
+
+- memcpy(&localBuffer[start], buffer, nToCopy);
++ tn = yaffs_add_find_tnode_0(dev,
++ &in->variant.file_variant,
++ inode_chunk,
++ NULL);
++ if (!tn)
++ return YAFFS_FAIL;
++
++ if(!nand_chunk)
++ /* Dummy insert, bail now */
++ return YAFFS_OK;
+
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(in, chunk,
+- localBuffer,
+- nToWriteBack,
+- 0);
++ existingChunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+- yaffs_ReleaseTempBuffer(dev, localBuffer,
+- __LINE__);
++ if (in_scan != 0) {
++ /* If we're scanning then we need to test for duplicates
++ * NB This does not need to be efficient since it should only ever
++ * happen when the power fails during a write, then only one
++ * chunk should ever be affected.
++ *
++ * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
++ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
++ */
++
++ if (existingChunk > 0) {
++ /* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
++ * thus we have to do a FindChunkInFile to get the real chunk id.
++ *
++ * We have a duplicate now we need to decide which one to use:
++ *
++ * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
++ * Forward scanning YAFFS2: The new one is what we use, dump the old one.
++ * YAFFS1: Get both sets of tags and compare serial numbers.
++ */
++
++ if (in_scan > 0) {
++ /* Only do this for forward scanning */
++ yaffs_rd_chunk_tags_nand(dev,
++ nand_chunk,
++ NULL, &newTags);
+
++ /* Do a proper find */
++ existingChunk =
++ yaffs_find_chunk_in_file(in, inode_chunk,
++ &existingTags);
+ }
+
+- } else {
+- /* A full chunk. Write directly from the supplied buffer. */
++ if (existingChunk <= 0) {
++ /*Hoosterman - how did this happen? */
+
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: existing chunk < 0 in scan"
++ TENDSTR)));
+
++ }
+
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(in, chunk, buffer,
+- dev->nDataBytesPerChunk,
+- 0);
++ /* NB The deleted flags should be false, otherwise the chunks will
++ * not be loaded during a scan
++ */
+
+- /* Since we've overwritten the cached data, we better invalidate it. */
+- yaffs_InvalidateChunkCache(in, chunk);
+- }
++ if (in_scan > 0) {
++ newSerial = newTags.serial_number;
++ existingSerial = existingTags.serial_number;
++ }
+
+- if (chunkWritten >= 0) {
+- n -= nToCopy;
+- offset += nToCopy;
+- buffer += nToCopy;
+- nDone += nToCopy;
++ if ((in_scan > 0) &&
++ (existingChunk <= 0 ||
++ ((existingSerial + 1) & 3) == newSerial)) {
++ /* Forward scanning.
++ * Use new
++ * Delete the old one and drop through to update the tnode
++ */
++ yaffs_chunk_del(dev, existingChunk, 1,
++ __LINE__);
++ } else {
++ /* Backward scanning or we want to use the existing one
++ * Use existing.
++ * Delete the new one and return early so that the tnode isn't changed
++ */
++ yaffs_chunk_del(dev, nand_chunk, 1,
++ __LINE__);
++ return YAFFS_OK;
++ }
+ }
+
+ }
+
+- /* Update file object */
+-
+- if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize)
+- in->variant.fileVariant.fileSize = (startOfWrite + nDone);
++ if (existingChunk == 0)
++ in->n_data_chunks++;
+
+- in->dirty = 1;
++ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+- return nDone;
++ return YAFFS_OK;
+ }
+
++static int yaffs_rd_data_obj(yaffs_obj_t *in, int inode_chunk,
++ __u8 *buffer)
++{
++ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+-/* ---------------------- File resizing stuff ------------------ */
++ if (nand_chunk >= 0)
++ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
++ buffer, NULL);
++ else {
++ T(YAFFS_TRACE_NANDACCESS,
++ (TSTR("Chunk %d not found zero instead" TENDSTR),
++ nand_chunk));
++ /* get sane (zero) data if you read a hole */
++ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
++ return 0;
++ }
++
++}
+
+-static void yaffs_PruneResizedChunks(yaffs_Object *in, int newSize)
++void yaffs_chunk_del(yaffs_dev_t *dev, int chunk_id, int mark_flash, int lyn)
+ {
++ int block;
++ int page;
++ yaffs_ext_tags tags;
++ yaffs_block_info_t *bi;
+
+- yaffs_Device *dev = in->myDev;
+- int oldFileSize = in->variant.fileVariant.fileSize;
++ if (chunk_id <= 0)
++ return;
+
+- int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk;
++ dev->n_deletions++;
++ block = chunk_id / dev->param.chunks_per_block;
++ page = chunk_id % dev->param.chunks_per_block;
+
+- int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) /
+- dev->nDataBytesPerChunk;
+- int i;
+- int chunkId;
+
+- /* Delete backwards so that we don't end up with holes if
+- * power is lost part-way through the operation.
++ if (!yaffs_check_chunk_bit(dev, block, page))
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Deleting invalid chunk %d"TENDSTR),
++ chunk_id));
++
++ bi = yaffs_get_block_info(dev, block);
++
++ yaffs2_update_oldest_dirty_seq(dev, block, bi);
++
++ T(YAFFS_TRACE_DELETION,
++ (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunk_id));
++
++ if (!dev->param.is_yaffs2 && mark_flash &&
++ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
++
++ yaffs_init_tags(&tags);
++
++ tags.is_deleted = 1;
++
++ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
++ yaffs_handle_chunk_update(dev, chunk_id, &tags);
++ } else {
++ dev->n_unmarked_deletions++;
++ }
++
++ /* Pull out of the management area.
++ * If the whole block became dirty, this will kick off an erasure.
+ */
+- for (i = lastDel; i >= startDel; i--) {
+- /* NB this could be optimised somewhat,
+- * eg. could retrieve the tags and write them without
+- * using yaffs_DeleteChunk
+- */
++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
++ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
++ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ dev->n_free_chunks++;
+
+- chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL);
+- if (chunkId > 0) {
+- if (chunkId <
+- (dev->internalStartBlock * dev->nChunksPerBlock)
+- || chunkId >=
+- ((dev->internalEndBlock +
+- 1) * dev->nChunksPerBlock)) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Found daft chunkId %d for %d" TENDSTR),
+- chunkId, i));
+- } else {
+- in->nDataChunks--;
+- yaffs_DeleteChunk(dev, chunkId, 1, __LINE__);
+- }
++ yaffs_clear_chunk_bit(dev, block, page);
++
++ bi->pages_in_use--;
++
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
++ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ yaffs_block_became_dirty(dev, block);
+ }
++
+ }
+
+ }
+
+-int yaffs_ResizeFile(yaffs_Object *in, loff_t newSize)
++static int yaffs_wr_data_obj(yaffs_obj_t *in, int inode_chunk,
++ const __u8 *buffer, int n_bytes,
++ int useReserve)
+ {
++ /* Find old chunk Need to do this to get serial number
++ * Write new one and patch into tree.
++ * Invalidate old tags.
++ */
+
+- int oldFileSize = in->variant.fileVariant.fileSize;
+- __u32 newSizeOfPartialChunk;
+- int newFullChunks;
++ int prevChunkId;
++ yaffs_ext_tags prevTags;
+
+- yaffs_Device *dev = in->myDev;
++ int newChunkId;
++ yaffs_ext_tags newTags;
+
+- yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk);
++ yaffs_dev_t *dev = in->my_dev;
+
+- yaffs_FlushFilesChunkCache(in);
+- yaffs_InvalidateWholeChunkCache(in);
++ yaffs_check_gc(dev,0);
+
+- yaffs_CheckGarbageCollection(dev);
++ /* Get the previous chunk at this location in the file if it exists.
++ * If it does not exist then put a zero into the tree. This creates
++ * the tnode now, rather than later when it is harder to clean up.
++ */
++ prevChunkId = yaffs_find_chunk_in_file(in, inode_chunk, &prevTags);
++ if(prevChunkId < 1 &&
++ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
++ return 0;
+
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
+- return YAFFS_FAIL;
++ /* Set up new tags */
++ yaffs_init_tags(&newTags);
+
+- if (newSize == oldFileSize)
+- return YAFFS_OK;
++ newTags.chunk_id = inode_chunk;
++ newTags.obj_id = in->obj_id;
++ newTags.serial_number =
++ (prevChunkId > 0) ? prevTags.serial_number + 1 : 1;
++ newTags.n_bytes = n_bytes;
+
+- if (newSize < oldFileSize) {
++ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), n_bytes));
++ YBUG();
++ }
++
++
++ newChunkId =
++ yaffs_write_new_chunk(dev, buffer, &newTags,
++ useReserve);
+
+- yaffs_PruneResizedChunks(in, newSize);
++ if (newChunkId > 0) {
++ yaffs_put_chunk_in_file(in, inode_chunk, newChunkId, 0);
+
+- if (newSizeOfPartialChunk != 0) {
+- int lastChunk = 1 + newFullChunks;
++ if (prevChunkId > 0)
++ yaffs_chunk_del(dev, prevChunkId, 1, __LINE__);
+
+- __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
++ yaffs_verify_file_sane(in);
++ }
++ return newChunkId;
+
+- /* Got to read and rewrite the last chunk with its new size and zero pad */
+- yaffs_ReadChunkDataFromObject(in, lastChunk,
+- localBuffer);
++}
+
+- memset(localBuffer + newSizeOfPartialChunk, 0,
+- dev->nDataBytesPerChunk - newSizeOfPartialChunk);
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_update_oh(yaffs_obj_t *in, const YCHAR *name, int force,
++ int is_shrink, int shadows, yaffs_xattr_mod *xmod)
++{
+
+- yaffs_WriteChunkDataToObject(in, lastChunk, localBuffer,
+- newSizeOfPartialChunk, 1);
++ yaffs_block_info_t *bi;
+
+- yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+- }
++ yaffs_dev_t *dev = in->my_dev;
+
+- in->variant.fileVariant.fileSize = newSize;
++ int prevChunkId;
++ int retVal = 0;
++ int result = 0;
+
+- yaffs_PruneFileStructure(dev, &in->variant.fileVariant);
+- } else {
+- /* newsSize > oldFileSize */
+- in->variant.fileVariant.fileSize = newSize;
+- }
++ int newChunkId;
++ yaffs_ext_tags newTags;
++ yaffs_ext_tags oldTags;
++ const YCHAR *alias = NULL;
+
++ __u8 *buffer = NULL;
++ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- /* Write a new object header.
+- * show we've shrunk the file, if need be
+- * Do this only if the file is not in the deleted directories.
+- */
+- if (in->parent &&
+- in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
+- in->parent->objectId != YAFFS_OBJECTID_DELETED)
+- yaffs_UpdateObjectHeader(in, NULL, 0,
+- (newSize < oldFileSize) ? 1 : 0, 0);
++ yaffs_obj_header *oh = NULL;
+
+- return YAFFS_OK;
+-}
++ yaffs_strcpy(old_name, _Y("silly old name"));
+
+-loff_t yaffs_GetFileSize(yaffs_Object *obj)
+-{
+- obj = yaffs_GetEquivalentObject(obj);
+
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- return obj->variant.fileVariant.fileSize;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- return yaffs_strlen(obj->variant.symLinkVariant.alias);
+- default:
+- return 0;
+- }
+-}
++ if (!in->fake ||
++ in == dev->root_dir || /* The root_dir should also be saved */
++ force || xmod) {
+
++ yaffs_check_gc(dev,0);
++ yaffs_check_obj_details_loaded(in);
+
++ buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
++ oh = (yaffs_obj_header *) buffer;
++
++ prevChunkId = in->hdr_chunk;
++
++ if (prevChunkId > 0) {
++ result = yaffs_rd_chunk_tags_nand(dev, prevChunkId,
++ buffer, &oldTags);
++
++ yaffs_verify_oh(in, oh, &oldTags, 0);
++
++ memcpy(old_name, oh->name, sizeof(oh->name));
++ memset(buffer, 0xFF, sizeof(yaffs_obj_header));
++ } else
++ memset(buffer, 0xFF, dev->data_bytes_per_chunk);
++
++ oh->type = in->variant_type;
++ oh->yst_mode = in->yst_mode;
++ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+-int yaffs_FlushFile(yaffs_Object *in, int updateTime)
+-{
+- int retVal;
+- if (in->dirty) {
+- yaffs_FlushFilesChunkCache(in);
+- if (updateTime) {
+ #ifdef CONFIG_YAFFS_WINCE
+- yfsd_WinFileTimeNow(in->win_mtime);
++ oh->win_atime[0] = in->win_atime[0];
++ oh->win_ctime[0] = in->win_ctime[0];
++ oh->win_mtime[0] = in->win_mtime[0];
++ oh->win_atime[1] = in->win_atime[1];
++ oh->win_ctime[1] = in->win_ctime[1];
++ oh->win_mtime[1] = in->win_mtime[1];
+ #else
++ oh->yst_uid = in->yst_uid;
++ oh->yst_gid = in->yst_gid;
++ oh->yst_atime = in->yst_atime;
++ oh->yst_mtime = in->yst_mtime;
++ oh->yst_ctime = in->yst_ctime;
++ oh->yst_rdev = in->yst_rdev;
++#endif
++ if (in->parent)
++ oh->parent_obj_id = in->parent->obj_id;
++ else
++ oh->parent_obj_id = 0;
++
++ if (name && *name) {
++ memset(oh->name, 0, sizeof(oh->name));
++ yaffs_load_oh_from_name(dev,oh->name,name);
++ } else if (prevChunkId > 0)
++ memcpy(oh->name, old_name, sizeof(oh->name));
++ else
++ memset(oh->name, 0, sizeof(oh->name));
+
+- in->yst_mtime = Y_CURRENT_TIME;
++ oh->is_shrink = is_shrink;
+
+-#endif
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Should not happen */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ oh->file_size =
++ (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
++ || oh->parent_obj_id ==
++ YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
++ file_variant.file_size;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ oh->equiv_id =
++ in->variant.hardlink_variant.equiv_id;
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = in->variant.symlink_variant.alias;
++ if(!alias)
++ alias = _Y("no alias");
++ yaffs_strncpy(oh->alias,
++ alias,
++ YAFFS_MAX_ALIAS_LENGTH);
++ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++ break;
+ }
+
+- retVal = (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
+- 0) ? YAFFS_OK : YAFFS_FAIL;
+- } else {
+- retVal = YAFFS_OK;
+- }
++ /* process any xattrib modifications */
++ if(xmod)
++ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+- return retVal;
+
+-}
++ /* Tags */
++ yaffs_init_tags(&newTags);
++ in->serial++;
++ newTags.chunk_id = 0;
++ newTags.obj_id = in->obj_id;
++ newTags.serial_number = in->serial;
+
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object *in)
+-{
++ /* Add extra info for file header */
+
+- /* First off, invalidate the file's data in the cache, without flushing. */
+- yaffs_InvalidateWholeChunkCache(in);
++ newTags.extra_available = 1;
++ newTags.extra_parent_id = oh->parent_obj_id;
++ newTags.extra_length = oh->file_size;
++ newTags.extra_is_shrink = oh->is_shrink;
++ newTags.extra_equiv_id = oh->equiv_id;
++ newTags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
++ newTags.extra_obj_type = in->variant_type;
+
+- if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
+- /* Move to the unlinked directory so we have a record that it was deleted. */
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0);
++ yaffs_verify_oh(in, oh, &newTags, 1);
+
+- }
++ /* Create new chunk in NAND */
++ newChunkId =
++ yaffs_write_new_chunk(dev, buffer, &newTags,
++ (prevChunkId > 0) ? 1 : 0);
+
+- yaffs_RemoveObjectFromDirectory(in);
+- yaffs_DeleteChunk(in->myDev, in->hdrChunk, 1, __LINE__);
+- in->hdrChunk = 0;
++ if (newChunkId >= 0) {
+
+- yaffs_FreeObject(in);
+- return YAFFS_OK;
++ in->hdr_chunk = newChunkId;
+
+-}
++ if (prevChunkId > 0) {
++ yaffs_chunk_del(dev, prevChunkId, 1,
++ __LINE__);
++ }
+
+-/* yaffs_DeleteFile deletes the whole file data
+- * and the inode associated with the file.
+- * It does not delete the links associated with the file.
+- */
+-static int yaffs_UnlinkFileIfNeeded(yaffs_Object *in)
+-{
++ if (!yaffs_obj_cache_dirty(in))
++ in->dirty = 0;
+
+- int retVal;
+- int immediateDeletion = 0;
++ /* If this was a shrink, then mark the block that the chunk lives on */
++ if (is_shrink) {
++ bi = yaffs_get_block_info(in->my_dev,
++ newChunkId / in->my_dev->param.chunks_per_block);
++ bi->has_shrink_hdr = 1;
++ }
+
+-#ifdef __KERNEL__
+- if (!in->myInode)
+- immediateDeletion = 1;
+-#else
+- if (in->inUse <= 0)
+- immediateDeletion = 1;
+-#endif
++ }
++
++ retVal = newChunkId;
+
+- if (immediateDeletion) {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir,
+- _Y("deleted"), 0, 0);
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
+- in->objectId));
+- in->deleted = 1;
+- in->myDev->nDeletedFiles++;
+- if (1 || in->myDev->isYaffs2)
+- yaffs_ResizeFile(in, 0);
+- yaffs_SoftDeleteFile(in);
+- } else {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
+- _Y("unlinked"), 0, 0);
+ }
+
++ if (buffer)
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return retVal;
+ }
+
+-int yaffs_DeleteFile(yaffs_Object *in)
+-{
+- int retVal = YAFFS_OK;
+- int deleted = in->deleted;
+-
+- yaffs_ResizeFile(in, 0);
+-
+- if (in->nDataChunks > 0) {
+- /* Use soft deletion if there is data in the file.
+- * That won't be the case if it has been resized to zero.
+- */
+- if (!in->unlinked)
+- retVal = yaffs_UnlinkFileIfNeeded(in);
+-
+- if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
+- in->deleted = 1;
+- deleted = 1;
+- in->myDev->nDeletedFiles++;
+- yaffs_SoftDeleteFile(in);
+- }
+- return deleted ? YAFFS_OK : YAFFS_FAIL;
+- } else {
+- /* The file has no data chunks so we toss it immediately */
+- yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
+- in->variant.fileVariant.top = NULL;
+- yaffs_DoGenericObjectDeletion(in);
+-
+- return YAFFS_OK;
+- }
+-}
++/*------------------------ Short Operations Cache ----------------------------------------
++ * In many situations where there is no high level buffering (eg WinCE) a lot of
++ * reads might be short sequential reads, and a lot of writes may be short
++ * sequential writes. eg. scanning/writing a jpeg file.
++ * In these cases, a short read/write cache can provide a huge perfomance benefit
++ * with dumb-as-a-rock code.
++ * In Linux, the page cache provides read buffering aand the short op cache provides write
++ * buffering.
++ *
++ * There are a limited number (~10) of cache chunks per device so that we don't
++ * need a very intelligent search.
++ */
+
+-static int yaffs_DeleteDirectory(yaffs_Object *in)
++static int yaffs_obj_cache_dirty(yaffs_obj_t *obj)
+ {
+- /* First check that the directory is empty. */
+- if (ylist_empty(&in->variant.directoryVariant.children))
+- return yaffs_DoGenericObjectDeletion(in);
++ yaffs_dev_t *dev = obj->my_dev;
++ int i;
++ yaffs_cache_t *cache;
++ int nCaches = obj->my_dev->param.n_caches;
+
+- return YAFFS_FAIL;
++ for (i = 0; i < nCaches; i++) {
++ cache = &dev->cache[i];
++ if (cache->object == obj &&
++ cache->dirty)
++ return 1;
++ }
+
++ return 0;
+ }
+
+-static int yaffs_DeleteSymLink(yaffs_Object *in)
+-{
+- YFREE(in->variant.symLinkVariant.alias);
+-
+- return yaffs_DoGenericObjectDeletion(in);
+-}
+
+-static int yaffs_DeleteHardLink(yaffs_Object *in)
++static void yaffs_flush_file_cache(yaffs_obj_t *obj)
+ {
+- /* remove this hardlink from the list assocaited with the equivalent
+- * object
+- */
+- ylist_del_init(&in->hardLinks);
+- return yaffs_DoGenericObjectDeletion(in);
+-}
++ yaffs_dev_t *dev = obj->my_dev;
++ int lowest = -99; /* Stop compiler whining. */
++ int i;
++ yaffs_cache_t *cache;
++ int chunkWritten = 0;
++ int nCaches = obj->my_dev->param.n_caches;
+
+-int yaffs_DeleteObject(yaffs_Object *obj)
+-{
+-int retVal = -1;
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- retVal = yaffs_DeleteFile(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- return yaffs_DeleteDirectory(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- retVal = yaffs_DeleteSymLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- retVal = yaffs_DeleteHardLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- retVal = yaffs_DoGenericObjectDeletion(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- retVal = 0;
+- break; /* should not happen. */
+- }
++ if (nCaches > 0) {
++ do {
++ cache = NULL;
+
+- return retVal;
+-}
++ /* Find the dirty cache for this object with the lowest chunk id. */
++ for (i = 0; i < nCaches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].dirty) {
++ if (!cache
++ || dev->cache[i].chunk_id <
++ lowest) {
++ cache = &dev->cache[i];
++ lowest = cache->chunk_id;
++ }
++ }
++ }
+
+-static int yaffs_UnlinkWorker(yaffs_Object *obj)
+-{
++ if (cache && !cache->locked) {
++ /* Write it out and free it up */
+
+- int immediateDeletion = 0;
++ chunkWritten =
++ yaffs_wr_data_obj(cache->object,
++ cache->chunk_id,
++ cache->data,
++ cache->n_bytes,
++ 1);
++ cache->dirty = 0;
++ cache->object = NULL;
++ }
+
+-#ifdef __KERNEL__
+- if (!obj->myInode)
+- immediateDeletion = 1;
+-#else
+- if (obj->inUse <= 0)
+- immediateDeletion = 1;
+-#endif
++ } while (cache && chunkWritten > 0);
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- return yaffs_DeleteHardLink(obj);
+- } else if (!ylist_empty(&obj->hardLinks)) {
+- /* Curve ball: We're unlinking an object that has a hardlink.
+- *
+- * This problem arises because we are not strictly following
+- * The Linux link/inode model.
+- *
+- * We can't really delete the object.
+- * Instead, we do the following:
+- * - Select a hardlink.
+- * - Unhook it from the hard links
+- * - Unhook it from its parent directory (so that the rename can work)
+- * - Rename the object to the hardlink's name.
+- * - Delete the hardlink
+- */
++ if (cache) {
++ /* Hoosterman, disk full while writing cache out. */
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
+
+- yaffs_Object *hl;
+- int retVal;
+- YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++ }
++ }
+
+- hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
++}
+
+- ylist_del_init(&hl->hardLinks);
+- ylist_del_init(&hl->siblings);
++/*yaffs_flush_whole_cache(dev)
++ *
++ *
++ */
+
+- yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++void yaffs_flush_whole_cache(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ int nCaches = dev->param.n_caches;
++ int i;
+
+- retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
++ /* Find a dirty object in the cache and flush it...
++ * until there are no further dirty objects.
++ */
++ do {
++ obj = NULL;
++ for (i = 0; i < nCaches && !obj; i++) {
++ if (dev->cache[i].object &&
++ dev->cache[i].dirty)
++ obj = dev->cache[i].object;
+
+- if (retVal == YAFFS_OK)
+- retVal = yaffs_DoGenericObjectDeletion(hl);
++ }
++ if (obj)
++ yaffs_flush_file_cache(obj);
+
+- return retVal;
++ } while (obj);
+
+- } else if (immediateDeletion) {
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- return yaffs_DeleteFile(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- return yaffs_DeleteDirectory(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- return yaffs_DeleteSymLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- return yaffs_DoGenericObjectDeletion(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- default:
+- return YAFFS_FAIL;
+- }
+- } else
+- return yaffs_ChangeObjectName(obj, obj->myDev->unlinkedDir,
+- _Y("unlinked"), 0, 0);
+ }
+
+
+-static int yaffs_UnlinkObject(yaffs_Object *obj)
++/* Grab us a cache chunk for use.
++ * First look for an empty one.
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static yaffs_cache_t *yaffs_grab_chunk_worker(yaffs_dev_t *dev)
+ {
++ int i;
+
+- if (obj && obj->unlinkAllowed)
+- return yaffs_UnlinkWorker(obj);
+-
+- return YAFFS_FAIL;
+-
+-}
+-int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name)
+-{
+- yaffs_Object *obj;
++ if (dev->param.n_caches > 0) {
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (!dev->cache[i].object)
++ return &dev->cache[i];
++ }
++ }
+
+- obj = yaffs_FindObjectByName(dir, name);
+- return yaffs_UnlinkObject(obj);
++ return NULL;
+ }
+
+-/*----------------------- Initialisation Scanning ---------------------- */
+-
+-static void yaffs_HandleShadowedObject(yaffs_Device *dev, int objId,
+- int backwardScanning)
++static yaffs_cache_t *yaffs_grab_chunk_cache(yaffs_dev_t *dev)
+ {
+- yaffs_Object *obj;
++ yaffs_cache_t *cache;
++ yaffs_obj_t *theObj;
++ int usage;
++ int i;
++ int pushout;
+
+- if (!backwardScanning) {
+- /* Handle YAFFS1 forward scanning case
+- * For YAFFS1 we always do the deletion
+- */
++ if (dev->param.n_caches > 0) {
++ /* Try find a non-dirty one... */
+
+- } else {
+- /* Handle YAFFS2 case (backward scanning)
+- * If the shadowed object exists then ignore.
+- */
+- if (yaffs_FindObjectByNumber(dev, objId))
+- return;
+- }
++ cache = yaffs_grab_chunk_worker(dev);
+
+- /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+- * We put it in unlinked dir to be cleaned up after the scanning
+- */
+- obj =
+- yaffs_FindOrCreateObjectByNumber(dev, objId,
+- YAFFS_OBJECT_TYPE_FILE);
+- if (!obj)
+- return;
+- yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
+- obj->variant.fileVariant.shrinkSize = 0;
+- obj->valid = 1; /* So that we don't read any other info for this file */
++ if (!cache) {
++ /* They were all dirty, find the last recently used object and flush
++ * its cache, then find again.
++ * NB what's here is not very accurate, we actually flush the object
++ * the last recently used page.
++ */
+
+-}
++ /* With locking we can't assume we can use entry zero */
+
+-typedef struct {
+- int seq;
+- int block;
+-} yaffs_BlockIndex;
++ theObj = NULL;
++ usage = -1;
++ cache = NULL;
++ pushout = -1;
+
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object &&
++ !dev->cache[i].locked &&
++ (dev->cache[i].last_use < usage || !cache)) {
++ usage = dev->cache[i].last_use;
++ theObj = dev->cache[i].object;
++ cache = &dev->cache[i];
++ pushout = i;
++ }
++ }
+
+-static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList)
+-{
+- yaffs_Object *hl;
+- yaffs_Object *in;
++ if (!cache || cache->dirty) {
++ /* Flush and try again */
++ yaffs_flush_file_cache(theObj);
++ cache = yaffs_grab_chunk_worker(dev);
++ }
+
+- while (hardList) {
+- hl = hardList;
+- hardList = (yaffs_Object *) (hardList->hardLinks.next);
++ }
++ return cache;
++ } else
++ return NULL;
+
+- in = yaffs_FindObjectByNumber(dev,
+- hl->variant.hardLinkVariant.
+- equivalentObjectId);
++}
+
+- if (in) {
+- /* Add the hardlink pointers */
+- hl->variant.hardLinkVariant.equivalentObject = in;
+- ylist_add(&hl->hardLinks, &in->hardLinks);
+- } else {
+- /* Todo Need to report/handle this better.
+- * Got a problem... hardlink to a non-existant object
+- */
+- hl->variant.hardLinkVariant.equivalentObject = NULL;
+- YINIT_LIST_HEAD(&hl->hardLinks);
++/* Find a cached chunk */
++static yaffs_cache_t *yaffs_find_chunk_cache(const yaffs_obj_t *obj,
++ int chunk_id)
++{
++ yaffs_dev_t *dev = obj->my_dev;
++ int i;
++ if (dev->param.n_caches > 0) {
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].chunk_id == chunk_id) {
++ dev->cache_hits++;
+
++ return &dev->cache[i];
++ }
+ }
+ }
++ return NULL;
+ }
+
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_use_cache(yaffs_dev_t *dev, yaffs_cache_t *cache,
++ int isAWrite)
++{
++
++ if (dev->param.n_caches > 0) {
++ if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
++ /* Reset the cache usages */
++ int i;
++ for (i = 1; i < dev->param.n_caches; i++)
++ dev->cache[i].last_use = 0;
+
++ dev->cache_last_use = 0;
++ }
+
++ dev->cache_last_use++;
+
++ cache->last_use = dev->cache_last_use;
+
+-static int ybicmp(const void *a, const void *b)
+-{
+- register int aseq = ((yaffs_BlockIndex *)a)->seq;
+- register int bseq = ((yaffs_BlockIndex *)b)->seq;
+- register int ablock = ((yaffs_BlockIndex *)a)->block;
+- register int bblock = ((yaffs_BlockIndex *)b)->block;
+- if (aseq == bseq)
+- return ablock - bblock;
+- else
+- return aseq - bseq;
++ if (isAWrite)
++ cache->dirty = 1;
++ }
+ }
+
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_invalidate_chunk_cache(yaffs_obj_t *object, int chunk_id)
++{
++ if (object->my_dev->param.n_caches > 0) {
++ yaffs_cache_t *cache = yaffs_find_chunk_cache(object, chunk_id);
+
+-struct yaffs_ShadowFixerStruct {
+- int objectId;
+- int shadowedId;
+- struct yaffs_ShadowFixerStruct *next;
+-};
+-
++ if (cache)
++ cache->object = NULL;
++ }
++}
+
+-static void yaffs_StripDeletedObjects(yaffs_Device *dev)
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_invalidate_whole_cache(yaffs_obj_t *in)
+ {
+- /*
+- * Sort out state of unlinked and deleted objects after scanning.
+- */
+- struct ylist_head *i;
+- struct ylist_head *n;
+- yaffs_Object *l;
++ int i;
++ yaffs_dev_t *dev = in->my_dev;
+
+- /* Soft delete all the unlinked files */
+- ylist_for_each_safe(i, n,
+- &dev->unlinkedDir->variant.directoryVariant.children) {
+- if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
+- yaffs_DeleteObject(l);
++ if (dev->param.n_caches > 0) {
++ /* Invalidate it. */
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == in)
++ dev->cache[i].object = NULL;
+ }
+ }
++}
+
+- ylist_for_each_safe(i, n,
+- &dev->deletedDir->variant.directoryVariant.children) {
+- if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
+- yaffs_DeleteObject(l);
+- }
+- }
+
+-}
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
+
+-static int yaffs_Scan(yaffs_Device *dev)
++int yaffs_file_rd(yaffs_obj_t *in, __u8 *buffer, loff_t offset,
++ int n_bytes)
+ {
+- yaffs_ExtendedTags tags;
+- int blk;
+- int blockIterator;
+- int startIterator;
+- int endIterator;
+- int result;
+
+ int chunk;
+- int c;
+- int deleted;
+- yaffs_BlockState state;
+- yaffs_Object *hardList = NULL;
+- yaffs_BlockInfo *bi;
+- __u32 sequenceNumber;
+- yaffs_ObjectHeader *oh;
+- yaffs_Object *in;
+- yaffs_Object *parent;
++ __u32 start;
++ int nToCopy;
++ int n = n_bytes;
++ int nDone = 0;
++ yaffs_cache_t *cache;
+
+- int alloc_failed = 0;
++ yaffs_dev_t *dev;
+
+- struct yaffs_ShadowFixerStruct *shadowFixerList = NULL;
++ dev = in->my_dev;
+
++ while (n > 0) {
++ /* chunk = offset / dev->data_bytes_per_chunk + 1; */
++ /* start = offset % dev->data_bytes_per_chunk; */
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++ chunk++;
+
+- __u8 *chunkData;
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
++ if ((start + n) < dev->data_bytes_per_chunk)
++ nToCopy = n;
++ else
++ nToCopy = dev->data_bytes_per_chunk - start;
+
++ cache = yaffs_find_chunk_cache(in, chunk);
+
++ /* If the chunk is already in the cache or it is less than a whole chunk
++ * or we're using inband tags then use the cache (if there is caching)
++ * else bypass the cache.
++ */
++ if (cache || nToCopy != dev->data_bytes_per_chunk || dev->param.inband_tags) {
++ if (dev->param.n_caches > 0) {
+
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_Scan starts intstartblk %d intendblk %d..." TENDSTR),
+- dev->internalStartBlock, dev->internalEndBlock));
++ /* If we can't find the data in the cache, then load it up. */
+
+- chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++ if (!cache) {
++ cache = yaffs_grab_chunk_cache(in->my_dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->
++ data);
++ cache->n_bytes = 0;
++ }
+
+- dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
++ yaffs_use_cache(dev, cache, 0);
+
+- /* Scan all the blocks to determine their state */
+- for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+- bi = yaffs_GetBlockInfo(dev, blk);
+- yaffs_ClearChunkBits(dev, blk);
+- bi->pagesInUse = 0;
+- bi->softDeletions = 0;
++ cache->locked = 1;
+
+- yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
+
+- bi->blockState = state;
+- bi->sequenceNumber = sequenceNumber;
++ memcpy(buffer, &cache->data[start], nToCopy);
+
+- if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
+- bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++ cache->locked = 0;
++ } else {
++ /* Read into the local buffer then copy..*/
+
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+- state, sequenceNumber));
++ __u8 *localBuffer =
++ yaffs_get_temp_buffer(dev, __LINE__);
++ yaffs_rd_data_obj(in, chunk,
++ localBuffer);
+
+- if (state == YAFFS_BLOCK_STATE_DEAD) {
+- T(YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("block %d is bad" TENDSTR), blk));
+- } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block empty " TENDSTR)));
+- dev->nErasedBlocks++;
+- dev->nFreeChunks += dev->nChunksPerBlock;
+- }
+- }
++ memcpy(buffer, &localBuffer[start], nToCopy);
+
+- startIterator = dev->internalStartBlock;
+- endIterator = dev->internalEndBlock;
+
+- /* For each block.... */
+- for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
+- blockIterator++) {
++ yaffs_release_temp_buffer(dev, localBuffer,
++ __LINE__);
++ }
+
+- YYIELD();
++ } else {
+
+- YYIELD();
++ /* A full chunk. Read directly into the supplied buffer. */
++ yaffs_rd_data_obj(in, chunk, buffer);
+
+- blk = blockIterator;
++ }
+
+- bi = yaffs_GetBlockInfo(dev, blk);
+- state = bi->blockState;
++ n -= nToCopy;
++ offset += nToCopy;
++ buffer += nToCopy;
++ nDone += nToCopy;
+
+- deleted = 0;
++ }
+
+- /* For each chunk in each block that needs scanning....*/
+- for (c = 0; !alloc_failed && c < dev->nChunksPerBlock &&
+- state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+- /* Read the tags and decide what to do */
+- chunk = blk * dev->nChunksPerBlock + c;
++ return nDone;
++}
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+- &tags);
++int yaffs_do_file_wr(yaffs_obj_t *in, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough)
++{
+
+- /* Let's have a good look at this chunk... */
++ int chunk;
++ __u32 start;
++ int nToCopy;
++ int n = n_bytes;
++ int nDone = 0;
++ int nToWriteBack;
++ int startOfWrite = offset;
++ int chunkWritten = 0;
++ __u32 n_bytesRead;
++ __u32 chunkStart;
+
+- if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED || tags.chunkDeleted) {
+- /* YAFFS1 only...
+- * A deleted chunk
+- */
+- deleted++;
+- dev->nFreeChunks++;
+- /*T((" %d %d deleted\n",blk,c)); */
+- } else if (!tags.chunkUsed) {
+- /* An unassigned chunk in the block
+- * This means that either the block is empty or
+- * this is the one being allocated from
+- */
++ yaffs_dev_t *dev;
+
+- if (c == 0) {
+- /* We're looking at the first chunk in the block so the block is unused */
+- state = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- } else {
+- /* this is the block being allocated from */
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- (" Allocating from %d %d" TENDSTR),
+- blk, c));
+- state = YAFFS_BLOCK_STATE_ALLOCATING;
+- dev->allocationBlock = blk;
+- dev->allocationPage = c;
+- dev->allocationBlockFinder = blk;
+- /* Set it to here to encourage the allocator to go forth from here. */
++ dev = in->my_dev;
+
+- }
++ while (n > 0 && chunkWritten >= 0) {
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+- dev->nFreeChunks += (dev->nChunksPerBlock - c);
+- } else if (tags.chunkId > 0) {
+- /* chunkId > 0 so it is a data chunk... */
+- unsigned int endpos;
+-
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
+-
+- in = yaffs_FindOrCreateObjectByNumber(dev,
+- tags.
+- objectId,
+- YAFFS_OBJECT_TYPE_FILE);
+- /* PutChunkIntoFile checks for a clash (two data chunks with
+- * the same chunkId).
+- */
++ if (chunk * dev->data_bytes_per_chunk + start != offset ||
++ start >= dev->data_bytes_per_chunk) {
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("AddrToChunk of offset %d gives chunk %d start %d"
++ TENDSTR),
++ (int)offset, chunk, start));
++ }
++ chunk++; /* File pos to chunk in file offset */
+
+- if (!in)
+- alloc_failed = 1;
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
+
+- if (in) {
+- if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, 1))
+- alloc_failed = 1;
+- }
++ if ((start + n) < dev->data_bytes_per_chunk) {
++ nToCopy = n;
+
+- endpos =
+- (tags.chunkId - 1) * dev->nDataBytesPerChunk +
+- tags.byteCount;
+- if (in &&
+- in->variantType == YAFFS_OBJECT_TYPE_FILE
+- && in->variant.fileVariant.scannedFileSize <
+- endpos) {
+- in->variant.fileVariant.
+- scannedFileSize = endpos;
+- if (!dev->useHeaderFileSize) {
+- in->variant.fileVariant.
+- fileSize =
+- in->variant.fileVariant.
+- scannedFileSize;
+- }
++ /* Now folks, to calculate how many bytes to write back....
++ * If we're overwriting and not writing to then end of file then
++ * we need to write back as much as was there before.
++ */
+
+- }
+- /* T((" %d %d data %d %d\n",blk,c,tags.objectId,tags.chunkId)); */
+- } else {
+- /* chunkId == 0, so it is an ObjectHeader.
+- * Thus, we read in the object header and make the object
+- */
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
++ chunkStart = ((chunk - 1) * dev->data_bytes_per_chunk);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
+- chunkData,
+- NULL);
+-
+- oh = (yaffs_ObjectHeader *) chunkData;
+-
+- in = yaffs_FindObjectByNumber(dev,
+- tags.objectId);
+- if (in && in->variantType != oh->type) {
+- /* This should not happen, but somehow
+- * Wev'e ended up with an objectId that has been reused but not yet
+- * deleted, and worse still it has changed type. Delete the old object.
+- */
++ if (chunkStart > in->variant.file_variant.file_size)
++ n_bytesRead = 0; /* Past end of file */
++ else
++ n_bytesRead = in->variant.file_variant.file_size - chunkStart;
+
+- yaffs_DeleteObject(in);
++ if (n_bytesRead > dev->data_bytes_per_chunk)
++ n_bytesRead = dev->data_bytes_per_chunk;
+
+- in = 0;
+- }
++ nToWriteBack =
++ (n_bytesRead >
++ (start + n)) ? n_bytesRead : (start + n);
+
+- in = yaffs_FindOrCreateObjectByNumber(dev,
+- tags.
+- objectId,
+- oh->type);
+-
+- if (!in)
+- alloc_failed = 1;
+-
+- if (in && oh->shadowsObject > 0) {
+-
+- struct yaffs_ShadowFixerStruct *fixer;
+- fixer = YMALLOC(sizeof(struct yaffs_ShadowFixerStruct));
+- if (fixer) {
+- fixer->next = shadowFixerList;
+- shadowFixerList = fixer;
+- fixer->objectId = tags.objectId;
+- fixer->shadowedId = oh->shadowsObject;
+- }
++ if (nToWriteBack < 0 || nToWriteBack > dev->data_bytes_per_chunk)
++ YBUG();
++
++ } else {
++ nToCopy = dev->data_bytes_per_chunk - start;
++ nToWriteBack = dev->data_bytes_per_chunk;
++ }
++
++ if (nToCopy != dev->data_bytes_per_chunk || dev->param.inband_tags) {
++ /* An incomplete start or end chunk (or maybe both start and end chunk),
++ * or we're using inband tags, so we want to use the cache buffers.
++ */
++ if (dev->param.n_caches > 0) {
++ yaffs_cache_t *cache;
++ /* If we can't find the data in the cache, then load the cache */
++ cache = yaffs_find_chunk_cache(in, chunk);
+
++ if (!cache
++ && yaffs_check_alloc_available(dev, 1)) {
++ cache = yaffs_grab_chunk_cache(dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->data);
++ } else if (cache &&
++ !cache->dirty &&
++ !yaffs_check_alloc_available(dev, 1)) {
++ /* Drop the cache if it was a read cache item and
++ * no space check has been made for it.
++ */
++ cache = NULL;
+ }
+
+- if (in && in->valid) {
+- /* We have already filled this one. We have a duplicate and need to resolve it. */
++ if (cache) {
++ yaffs_use_cache(dev, cache, 1);
++ cache->locked = 1;
+
+- unsigned existingSerial = in->serial;
+- unsigned newSerial = tags.serialNumber;
+
+- if (((existingSerial + 1) & 3) == newSerial) {
+- /* Use new one - destroy the exisiting one */
+- yaffs_DeleteChunk(dev,
+- in->hdrChunk,
+- 1, __LINE__);
+- in->valid = 0;
+- } else {
+- /* Use existing - destroy this one. */
+- yaffs_DeleteChunk(dev, chunk, 1,
+- __LINE__);
++ memcpy(&cache->data[start], buffer,
++ nToCopy);
++
++
++ cache->locked = 0;
++ cache->n_bytes = nToWriteBack;
++
++ if (write_trhrough) {
++ chunkWritten =
++ yaffs_wr_data_obj
++ (cache->object,
++ cache->chunk_id,
++ cache->data, cache->n_bytes,
++ 1);
++ cache->dirty = 0;
+ }
++
++ } else {
++ chunkWritten = -1; /* fail the write */
+ }
++ } else {
++ /* An incomplete start or end chunk (or maybe both start and end chunk)
++ * Read into the local buffer then copy, then copy over and write back.
++ */
+
+- if (in && !in->valid &&
+- (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId == YAFFS_OBJECTID_LOSTNFOUND)) {
+- /* We only load some info, don't fiddle with directory structure */
+- in->valid = 1;
+- in->variantType = oh->type;
++ __u8 *localBuffer =
++ yaffs_get_temp_buffer(dev, __LINE__);
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
+-#endif
+- in->hdrChunk = chunk;
+- in->serial = tags.serialNumber;
++ yaffs_rd_data_obj(in, chunk,
++ localBuffer);
+
+- } else if (in && !in->valid) {
+- /* we need to load this info */
+
+- in->valid = 1;
+- in->variantType = oh->type;
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
+-#endif
+- in->hdrChunk = chunk;
+- in->serial = tags.serialNumber;
++ memcpy(&localBuffer[start], buffer, nToCopy);
+
+- yaffs_SetObjectName(in, oh->name);
+- in->dirty = 0;
++ chunkWritten =
++ yaffs_wr_data_obj(in, chunk,
++ localBuffer,
++ nToWriteBack,
++ 0);
+
+- /* directory stuff...
+- * hook up to parent
+- */
++ yaffs_release_temp_buffer(dev, localBuffer,
++ __LINE__);
+
+- parent =
+- yaffs_FindOrCreateObjectByNumber
+- (dev, oh->parentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+- if (!parent)
+- alloc_failed = 1;
+- if (parent && parent->variantType ==
+- YAFFS_OBJECT_TYPE_UNKNOWN) {
+- /* Set up as a directory */
+- parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- YINIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (!parent || parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY) {
+- /* Hoosterman, another problem....
+- * We're trying to use a non-directory as a directory
+- */
++ }
+
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+- TENDSTR)));
+- parent = dev->lostNFoundDir;
+- }
++ } else {
++ /* A full chunk. Write directly from the supplied buffer. */
+
+- yaffs_AddObjectToDirectory(parent, in);
+
+- if (0 && (parent == dev->deletedDir ||
+- parent == dev->unlinkedDir)) {
+- in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
+- dev->nDeletedFiles++;
+- }
+- /* Note re hardlinks.
+- * Since we might scan a hardlink before its equivalent object is scanned
+- * we put them all in a list.
+- * After scanning is complete, we should have all the objects, so we run through this
+- * list and fix up all the chains.
+- */
+
+- switch (in->variantType) {
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* Todo got a problem */
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+- if (dev->useHeaderFileSize)
+-
+- in->variant.fileVariant.
+- fileSize =
+- oh->fileSize;
+-
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- in->variant.hardLinkVariant.
+- equivalentObjectId =
+- oh->equivalentObjectId;
+- in->hardLinks.next =
+- (struct ylist_head *)
+- hardList;
+- hardList = in;
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->alias);
+- if (!in->variant.symLinkVariant.alias)
+- alloc_failed = 1;
+- break;
+- }
++ chunkWritten =
++ yaffs_wr_data_obj(in, chunk, buffer,
++ dev->data_bytes_per_chunk,
++ 0);
+
+-/*
+- if (parent == dev->deletedDir) {
+- yaffs_DestroyObject(in);
+- bi->hasShrinkHeader = 1;
+- }
+-*/
+- }
+- }
++ /* Since we've overwritten the cached data, we better invalidate it. */
++ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+- if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+- /* If we got this far while scanning, then the block is fully allocated.*/
+- state = YAFFS_BLOCK_STATE_FULL;
++ if (chunkWritten >= 0) {
++ n -= nToCopy;
++ offset += nToCopy;
++ buffer += nToCopy;
++ nDone += nToCopy;
+ }
+
+- bi->blockState = state;
++ }
+
+- /* Now let's see if it was dirty */
+- if (bi->pagesInUse == 0 &&
+- !bi->hasShrinkHeader &&
+- bi->blockState == YAFFS_BLOCK_STATE_FULL) {
+- yaffs_BlockBecameDirty(dev, blk);
+- }
++ /* Update file object */
+
+- }
++ if ((startOfWrite + nDone) > in->variant.file_variant.file_size)
++ in->variant.file_variant.file_size = (startOfWrite + nDone);
+
++ in->dirty = 1;
+
+- /* Ok, we've done all the scanning.
+- * Fix up the hard link chains.
+- * We should now have scanned all the objects, now it's time to add these
+- * hardlinks.
+- */
++ return nDone;
++}
+
+- yaffs_HardlinkFixup(dev, hardList);
++int yaffs_wr_file(yaffs_obj_t *in, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough)
++{
++ yaffs2_handle_hole(in,offset);
++ return yaffs_do_file_wr(in,buffer,offset,n_bytes,write_trhrough);
++}
+
+- /* Fix up any shadowed objects */
+- {
+- struct yaffs_ShadowFixerStruct *fixer;
+- yaffs_Object *obj;
+-
+- while (shadowFixerList) {
+- fixer = shadowFixerList;
+- shadowFixerList = fixer->next;
+- /* Complete the rename transaction by deleting the shadowed object
+- * then setting the object header to unshadowed.
+- */
+- obj = yaffs_FindObjectByNumber(dev, fixer->shadowedId);
+- if (obj)
+- yaffs_DeleteObject(obj);
+
+- obj = yaffs_FindObjectByNumber(dev, fixer->objectId);
+
+- if (obj)
+- yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++/* ---------------------- File resizing stuff ------------------ */
+
+- YFREE(fixer);
+- }
+- }
++static void yaffs_prune_chunks(yaffs_obj_t *in, int new_size)
++{
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++ yaffs_dev_t *dev = in->my_dev;
++ int oldFileSize = in->variant.file_variant.file_size;
+
+- if (alloc_failed)
+- return YAFFS_FAIL;
++ int lastDel = 1 + (oldFileSize - 1) / dev->data_bytes_per_chunk;
++
++ int startDel = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
++ dev->data_bytes_per_chunk;
++ int i;
++ int chunk_id;
+
+- T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
++ /* Delete backwards so that we don't end up with holes if
++ * power is lost part-way through the operation.
++ */
++ for (i = lastDel; i >= startDel; i--) {
++ /* NB this could be optimised somewhat,
++ * eg. could retrieve the tags and write them without
++ * using yaffs_chunk_del
++ */
+
++ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
++ if (chunk_id > 0) {
++ if (chunk_id <
++ (dev->internal_start_block * dev->param.chunks_per_block)
++ || chunk_id >=
++ ((dev->internal_end_block +
++ 1) * dev->param.chunks_per_block)) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("Found daft chunk_id %d for %d" TENDSTR),
++ chunk_id, i));
++ } else {
++ in->n_data_chunks--;
++ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
++ }
++ }
++ }
+
+- return YAFFS_OK;
+ }
+
+-static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in)
+-{
+- __u8 *chunkData;
+- yaffs_ObjectHeader *oh;
+- yaffs_Device *dev;
+- yaffs_ExtendedTags tags;
+- int result;
+- int alloc_failed = 0;
+
+- if (!in)
+- return;
++void yaffs_resize_file_down( yaffs_obj_t *obj, loff_t new_size)
++{
++ int newFullChunks;
++ __u32 new_sizeOfPartialChunk;
++ yaffs_dev_t *dev = obj->my_dev;
+
+- dev = in->myDev;
++ yaffs_addr_to_chunk(dev, new_size, &newFullChunks, &new_sizeOfPartialChunk);
+
+-#if 0
+- T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
+- in->objectId,
+- in->lazyLoaded ? "not yet" : "already"));
+-#endif
++ yaffs_prune_chunks(obj, new_size);
+
+- if (in->lazyLoaded && in->hdrChunk > 0) {
+- in->lazyLoaded = 0;
+- chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++ if (new_sizeOfPartialChunk != 0) {
++ int lastChunk = 1 + newFullChunks;
++ __u8 *localBuffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, in->hdrChunk, chunkData, &tags);
+- oh = (yaffs_ObjectHeader *) chunkData;
++ /* Got to read and rewrite the last chunk with its new size and zero pad */
++ yaffs_rd_data_obj(obj, lastChunk, localBuffer);
++ memset(localBuffer + new_sizeOfPartialChunk, 0,
++ dev->data_bytes_per_chunk - new_sizeOfPartialChunk);
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
++ yaffs_wr_data_obj(obj, lastChunk, localBuffer,
++ new_sizeOfPartialChunk, 1);
+
+-#endif
+- yaffs_SetObjectName(in, oh->name);
++ yaffs_release_temp_buffer(dev, localBuffer, __LINE__);
++ }
+
+- if (in->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->alias);
+- if (!in->variant.symLinkVariant.alias)
+- alloc_failed = 1; /* Not returned to caller */
+- }
++ obj->variant.file_variant.file_size = new_size;
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+- }
++ yaffs_prune_tree(dev, &obj->variant.file_variant);
+ }
+
+-static int yaffs_ScanBackwards(yaffs_Device *dev)
+-{
+- yaffs_ExtendedTags tags;
+- int blk;
+- int blockIterator;
+- int startIterator;
+- int endIterator;
+- int nBlocksToScan = 0;
+-
+- int chunk;
+- int result;
+- int c;
+- int deleted;
+- yaffs_BlockState state;
+- yaffs_Object *hardList = NULL;
+- yaffs_BlockInfo *bi;
+- __u32 sequenceNumber;
+- yaffs_ObjectHeader *oh;
+- yaffs_Object *in;
+- yaffs_Object *parent;
+- int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+- int itsUnlinked;
+- __u8 *chunkData;
+
+- int fileSize;
+- int isShrink;
+- int foundChunksInBlock;
+- int equivalentObjectId;
+- int alloc_failed = 0;
++int yaffs_resize_file(yaffs_obj_t *in, loff_t new_size)
++{
++ yaffs_dev_t *dev = in->my_dev;
++ int oldFileSize = in->variant.file_variant.file_size;
+
++ yaffs_flush_file_cache(in);
++ yaffs_invalidate_whole_cache(in);
+
+- yaffs_BlockIndex *blockIndex = NULL;
+- int altBlockIndex = 0;
++ yaffs_check_gc(dev,0);
+
+- if (!dev->isYaffs2) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR)));
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+- }
+
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- ("yaffs_ScanBackwards starts intstartblk %d intendblk %d..."
+- TENDSTR), dev->internalStartBlock, dev->internalEndBlock));
++ if (new_size == oldFileSize)
++ return YAFFS_OK;
++
++ if(new_size > oldFileSize){
++ yaffs2_handle_hole(in,new_size);
++ in->variant.file_variant.file_size = new_size;
++ } else {
++ /* new_size < oldFileSize */
++ yaffs_resize_file_down(in, new_size);
++ }
+
++ /* Write a new object header to reflect the resize.
++ * show we've shrunk the file, if need be
++ * Do this only if the file is not in the deleted directories
++ * and is not shadowed.
++ */
++ if (in->parent &&
++ !in->is_shadowed &&
++ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
++ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+- dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+- blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++ return YAFFS_OK;
++}
+
+- if (!blockIndex) {
+- blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
+- altBlockIndex = 1;
+- }
++loff_t yaffs_get_file_size(yaffs_obj_t *obj)
++{
++ YCHAR *alias = NULL;
++ obj = yaffs_get_equivalent_obj(obj);
+
+- if (!blockIndex) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
+- return YAFFS_FAIL;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return obj->variant.file_variant.file_size;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = obj->variant.symlink_variant.alias;
++ if(!alias)
++ return 0;
++ return yaffs_strnlen(alias,YAFFS_MAX_ALIAS_LENGTH);
++ default:
++ return 0;
+ }
++}
+
+- dev->blocksInCheckpoint = 0;
+-
+- chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+-
+- /* Scan all the blocks to determine their state */
+- for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+- bi = yaffs_GetBlockInfo(dev, blk);
+- yaffs_ClearChunkBits(dev, blk);
+- bi->pagesInUse = 0;
+- bi->softDeletions = 0;
+-
+- yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
+
+- bi->blockState = state;
+- bi->sequenceNumber = sequenceNumber;
+
+- if (bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+- bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+- if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
+- bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++int yaffs_flush_file(yaffs_obj_t *in, int update_time, int data_sync)
++{
++ int retVal;
++ if (in->dirty) {
++ yaffs_flush_file_cache(in);
++ if(data_sync) /* Only sync data */
++ retVal=YAFFS_OK;
++ else {
++ if (update_time) {
++#ifdef CONFIG_YAFFS_WINCE
++ yfsd_win_file_time_now(in->win_mtime);
++#else
+
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+- state, sequenceNumber));
++ in->yst_mtime = Y_CURRENT_TIME;
+
++#endif
++ }
+
+- if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+- dev->blocksInCheckpoint++;
++ retVal = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
++ 0) ? YAFFS_OK : YAFFS_FAIL;
++ }
++ } else {
++ retVal = YAFFS_OK;
++ }
+
+- } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+- T(YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("block %d is bad" TENDSTR), blk));
+- } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block empty " TENDSTR)));
+- dev->nErasedBlocks++;
+- dev->nFreeChunks += dev->nChunksPerBlock;
+- } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ return retVal;
+
+- /* Determine the highest sequence number */
+- if (sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+- sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++}
+
+- blockIndex[nBlocksToScan].seq = sequenceNumber;
+- blockIndex[nBlocksToScan].block = blk;
++static int yaffs_generic_obj_del(yaffs_obj_t *in)
++{
+
+- nBlocksToScan++;
++ /* First off, invalidate the file's data in the cache, without flushing. */
++ yaffs_invalidate_whole_cache(in);
+
+- if (sequenceNumber >= dev->sequenceNumber)
+- dev->sequenceNumber = sequenceNumber;
+- } else {
+- /* TODO: Nasty sequence number! */
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- ("Block scanning block %d has bad sequence number %d"
+- TENDSTR), blk, sequenceNumber));
++ if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
++ /* Move to the unlinked directory so we have a record that it was deleted. */
++ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0, 0);
+
+- }
+- }
+ }
+
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
++ yaffs_remove_obj_from_dir(in);
++ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
++ in->hdr_chunk = 0;
+
++ yaffs_free_obj(in);
++ return YAFFS_OK;
+
++}
+
+- YYIELD();
++/* yaffs_del_file deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_unlink_file_if_needed(yaffs_obj_t *in)
++{
+
+- /* Sort the blocks */
+-#ifndef CONFIG_YAFFS_USE_OWN_SORT
+- {
+- /* Use qsort now. */
+- yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), ybicmp);
+- }
+-#else
+- {
+- /* Dungy old bubble sort... */
++ int retVal;
++ int immediateDeletion = 0;
++ yaffs_dev_t *dev = in->my_dev;
+
+- yaffs_BlockIndex temp;
+- int i;
+- int j;
++ if (!in->my_inode)
++ immediateDeletion = 1;
+
+- for (i = 0; i < nBlocksToScan; i++)
+- for (j = i + 1; j < nBlocksToScan; j++)
+- if (blockIndex[i].seq > blockIndex[j].seq) {
+- temp = blockIndex[j];
+- blockIndex[j] = blockIndex[i];
+- blockIndex[i] = temp;
+- }
++ if (immediateDeletion) {
++ retVal =
++ yaffs_change_obj_name(in, in->my_dev->del_dir,
++ _Y("deleted"), 0, 0);
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
++ in->obj_id));
++ in->deleted = 1;
++ in->my_dev->n_deleted_files++;
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
++ yaffs_soft_del_file(in);
++ } else {
++ retVal =
++ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
+ }
+-#endif
+
+- YYIELD();
+
+- T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++ return retVal;
++}
+
+- /* Now scan the blocks looking at the data. */
+- startIterator = 0;
+- endIterator = nBlocksToScan - 1;
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++int yaffs_del_file(yaffs_obj_t *in)
++{
++ int retVal = YAFFS_OK;
++ int deleted; /* Need to cache value on stack if in is freed */
++ yaffs_dev_t *dev = in->my_dev;
+
+- /* For each block.... backwards */
+- for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
+- blockIterator--) {
+- /* Cooperative multitasking! This loop can run for so
+- long that watchdog timers expire. */
+- YYIELD();
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
+
+- /* get the block to scan in the correct order */
+- blk = blockIndex[blockIterator].block;
++ if (in->n_data_chunks > 0) {
++ /* Use soft deletion if there is data in the file.
++ * That won't be the case if it has been resized to zero.
++ */
++ if (!in->unlinked)
++ retVal = yaffs_unlink_file_if_needed(in);
+
+- bi = yaffs_GetBlockInfo(dev, blk);
++ deleted = in->deleted;
+
++ if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
++ in->deleted = 1;
++ deleted = 1;
++ in->my_dev->n_deleted_files++;
++ yaffs_soft_del_file(in);
++ }
++ return deleted ? YAFFS_OK : YAFFS_FAIL;
++ } else {
++ /* The file has no data chunks so we toss it immediately */
++ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
++ in->variant.file_variant.top = NULL;
++ yaffs_generic_obj_del(in);
+
+- state = bi->blockState;
++ return YAFFS_OK;
++ }
++}
+
+- deleted = 0;
++static int yaffs_is_non_empty_dir(yaffs_obj_t *obj)
++{
++ return (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
++ !(ylist_empty(&obj->variant.dir_variant.children));
++}
+
+- /* For each chunk in each block that needs scanning.... */
+- foundChunksInBlock = 0;
+- for (c = dev->nChunksPerBlock - 1;
+- !alloc_failed && c >= 0 &&
+- (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+- state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+- /* Scan backwards...
+- * Read the tags and decide what to do
+- */
++static int yaffs_del_dir(yaffs_obj_t *obj)
++{
++ /* First check that the directory is empty. */
++ if (yaffs_is_non_empty_dir(obj))
++ return YAFFS_FAIL;
+
+- chunk = blk * dev->nChunksPerBlock + c;
++ return yaffs_generic_obj_del(obj);
++}
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+- &tags);
++static int yaffs_del_symlink(yaffs_obj_t *in)
++{
++ if(in->variant.symlink_variant.alias)
++ YFREE(in->variant.symlink_variant.alias);
++ in->variant.symlink_variant.alias=NULL;
+
+- /* Let's have a good look at this chunk... */
++ return yaffs_generic_obj_del(in);
++}
+
+- if (!tags.chunkUsed) {
+- /* An unassigned chunk in the block.
+- * If there are used chunks after this one, then
+- * it is a chunk that was skipped due to failing the erased
+- * check. Just skip it so that it can be deleted.
+- * But, more typically, We get here when this is an unallocated
+- * chunk and his means that either the block is empty or
+- * this is the one being allocated from
+- */
++static int yaffs_del_link(yaffs_obj_t *in)
++{
++ /* remove this hardlink from the list assocaited with the equivalent
++ * object
++ */
++ ylist_del_init(&in->hard_links);
++ return yaffs_generic_obj_del(in);
++}
+
+- if (foundChunksInBlock) {
+- /* This is a chunk that was skipped due to failing the erased check */
+- } else if (c == 0) {
+- /* We're looking at the first chunk in the block so the block is unused */
+- state = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- } else {
+- if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+- state == YAFFS_BLOCK_STATE_ALLOCATING) {
+- if (dev->sequenceNumber == bi->sequenceNumber) {
+- /* this is the block being allocated from */
+-
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- (" Allocating from %d %d"
+- TENDSTR), blk, c));
+-
+- state = YAFFS_BLOCK_STATE_ALLOCATING;
+- dev->allocationBlock = blk;
+- dev->allocationPage = c;
+- dev->allocationBlockFinder = blk;
+- } else {
+- /* This is a partially written block that is not
+- * the current allocation block. This block must have
+- * had a write failure, so set up for retirement.
+- */
+-
+- /* bi->needsRetiring = 1; ??? TODO */
+- bi->gcPrioritise = 1;
+-
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Partially written block %d detected" TENDSTR),
+- blk));
+- }
+- }
+- }
++int yaffs_del_obj(yaffs_obj_t *obj)
++{
++int retVal = -1;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ retVal = yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if(!ylist_empty(&obj->variant.dir_variant.dirty)){
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Remove object %d from dirty directories" TENDSTR),obj->obj_id));
++ ylist_del_init(&obj->variant.dir_variant.dirty);
++ }
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ retVal = yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ retVal = yaffs_del_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ retVal = yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ retVal = 0;
++ break; /* should not happen. */
++ }
+
+- dev->nFreeChunks++;
++ return retVal;
++}
+
+- } else if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
+- blk, c));
+-
+- dev->nFreeChunks++;
+-
+- } else if (tags.chunkId > 0) {
+- /* chunkId > 0 so it is a data chunk... */
+- unsigned int endpos;
+- __u32 chunkBase =
+- (tags.chunkId - 1) * dev->nDataBytesPerChunk;
+-
+- foundChunksInBlock = 1;
+-
+-
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
+-
+- in = yaffs_FindOrCreateObjectByNumber(dev,
+- tags.
+- objectId,
+- YAFFS_OBJECT_TYPE_FILE);
+- if (!in) {
+- /* Out of memory */
+- alloc_failed = 1;
+- }
++static int yaffs_unlink_worker(yaffs_obj_t *obj)
++{
+
+- if (in &&
+- in->variantType == YAFFS_OBJECT_TYPE_FILE
+- && chunkBase <
+- in->variant.fileVariant.shrinkSize) {
+- /* This has not been invalidated by a resize */
+- if (!yaffs_PutChunkIntoFile(in, tags.chunkId,
+- chunk, -1)) {
+- alloc_failed = 1;
+- }
++ int immediateDeletion = 0;
+
+- /* File size is calculated by looking at the data chunks if we have not
+- * seen an object header yet. Stop this practice once we find an object header.
+- */
+- endpos =
+- (tags.chunkId -
+- 1) * dev->nDataBytesPerChunk +
+- tags.byteCount;
+-
+- if (!in->valid && /* have not got an object header yet */
+- in->variant.fileVariant.
+- scannedFileSize < endpos) {
+- in->variant.fileVariant.
+- scannedFileSize = endpos;
+- in->variant.fileVariant.
+- fileSize =
+- in->variant.fileVariant.
+- scannedFileSize;
+- }
++ if (!obj->my_inode)
++ immediateDeletion = 1;
+
+- } else if (in) {
+- /* This chunk has been invalidated by a resize, so delete */
+- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++ if(obj)
++ yaffs_update_parent(obj->parent);
+
+- }
+- } else {
+- /* chunkId == 0, so it is an ObjectHeader.
+- * Thus, we read in the object header and make the object
+- */
+- foundChunksInBlock = 1;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ return yaffs_del_link(obj);
++ } else if (!ylist_empty(&obj->hard_links)) {
++ /* Curve ball: We're unlinking an object that has a hardlink.
++ *
++ * This problem arises because we are not strictly following
++ * The Linux link/inode model.
++ *
++ * We can't really delete the object.
++ * Instead, we do the following:
++ * - Select a hardlink.
++ * - Unhook it from the hard links
++ * - Move it from its parent directory (so that the rename can work)
++ * - Rename the object to the hardlink's name.
++ * - Delete the hardlink
++ */
+
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
++ yaffs_obj_t *hl;
++ yaffs_obj_t *parent;
++ int retVal;
++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- oh = NULL;
+- in = NULL;
++ hl = ylist_entry(obj->hard_links.next, yaffs_obj_t, hard_links);
+
+- if (tags.extraHeaderInfoAvailable) {
+- in = yaffs_FindOrCreateObjectByNumber
+- (dev, tags.objectId,
+- tags.extraObjectType);
+- if (!in)
+- alloc_failed = 1;
+- }
++ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++ parent = hl->parent;
+
+- if (!in ||
+-#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+- !in->valid ||
+-#endif
+- tags.extraShadows ||
+- (!in->valid &&
+- (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))) {
+-
+- /* If we don't have valid info then we need to read the chunk
+- * TODO In future we can probably defer reading the chunk and
+- * living with invalid data until needed.
+- */
++ ylist_del_init(&hl->hard_links);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev,
+- chunk,
+- chunkData,
+- NULL);
+-
+- oh = (yaffs_ObjectHeader *) chunkData;
+-
+- if (dev->inbandTags) {
+- /* Fix up the header if they got corrupted by inband tags */
+- oh->shadowsObject = oh->inbandShadowsObject;
+- oh->isShrink = oh->inbandIsShrink;
+- }
++ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+- if (!in) {
+- in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
+- if (!in)
+- alloc_failed = 1;
+- }
++ retVal = yaffs_change_obj_name(obj,parent, name, 0, 0);
+
+- }
++ if (retVal == YAFFS_OK)
++ retVal = yaffs_generic_obj_del(hl);
+
+- if (!in) {
+- /* TODO Hoosterman we have a problem! */
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: Could not make object for object %d at chunk %d during scan"
+- TENDSTR), tags.objectId, chunk));
+- continue;
+- }
++ return retVal;
+
+- if (in->valid) {
+- /* We have already filled this one.
+- * We have a duplicate that will be discarded, but
+- * we first have to suck out resize info if it is a file.
+- */
++ } else if (immediateDeletion) {
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ ylist_del_init(&obj->variant.dir_variant.dirty);
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ return yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ return yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ return YAFFS_FAIL;
++ }
++ } else if(yaffs_is_non_empty_dir(obj))
++ return YAFFS_FAIL;
++ else
++ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
++}
+
+- if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) &&
+- ((oh &&
+- oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+- (tags.extraHeaderInfoAvailable &&
+- tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))) {
+- __u32 thisSize =
+- (oh) ? oh->fileSize : tags.
+- extraFileLength;
+- __u32 parentObjectId =
+- (oh) ? oh->
+- parentObjectId : tags.
+- extraParentObjectId;
+-
+-
+- isShrink =
+- (oh) ? oh->isShrink : tags.
+- extraIsShrinkHeader;
+
+- /* If it is deleted (unlinked at start also means deleted)
+- * we treat the file size as being zeroed at this point.
+- */
+- if (parentObjectId ==
+- YAFFS_OBJECTID_DELETED
+- || parentObjectId ==
+- YAFFS_OBJECTID_UNLINKED) {
+- thisSize = 0;
+- isShrink = 1;
+- }
++static int yaffs_unlink_obj(yaffs_obj_t *obj)
++{
+
+- if (isShrink &&
+- in->variant.fileVariant.
+- shrinkSize > thisSize) {
+- in->variant.fileVariant.
+- shrinkSize =
+- thisSize;
+- }
++ if (obj && obj->unlink_allowed)
++ return yaffs_unlink_worker(obj);
+
+- if (isShrink)
+- bi->hasShrinkHeader = 1;
++ return YAFFS_FAIL;
+
+- }
+- /* Use existing - destroy this one. */
+- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++}
++int yaffs_unlinker(yaffs_obj_t *dir, const YCHAR *name)
++{
++ yaffs_obj_t *obj;
+
+- }
++ obj = yaffs_find_by_name(dir, name);
++ return yaffs_unlink_obj(obj);
++}
+
+- if (!in->valid && in->variantType !=
+- (oh ? oh->type : tags.extraObjectType))
+- T(YAFFS_TRACE_ERROR, (
+- TSTR("yaffs tragedy: Bad object type, "
+- TCONT("%d != %d, for object %d at chunk ")
+- TCONT("%d during scan")
+- TENDSTR), oh ?
+- oh->type : tags.extraObjectType,
+- in->variantType, tags.objectId,
+- chunk));
+-
+- if (!in->valid &&
+- (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId ==
+- YAFFS_OBJECTID_LOSTNFOUND)) {
+- /* We only load some info, don't fiddle with directory structure */
+- in->valid = 1;
++/*----------------------- Initialisation Scanning ---------------------- */
+
+- if (oh) {
+- in->variantType = oh->type;
++void yaffs_handle_shadowed_obj(yaffs_dev_t *dev, int obj_id,
++ int backward_scanning)
++{
++ yaffs_obj_t *obj;
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
++ if (!backward_scanning) {
++ /* Handle YAFFS1 forward scanning case
++ * For YAFFS1 we always do the deletion
++ */
+
+-#endif
+- } else {
+- in->variantType = tags.extraObjectType;
+- in->lazyLoaded = 1;
+- }
++ } else {
++ /* Handle YAFFS2 case (backward scanning)
++ * If the shadowed object exists then ignore.
++ */
++ obj = yaffs_find_by_number(dev, obj_id);
++ if(obj)
++ return;
++ }
+
+- in->hdrChunk = chunk;
++ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
++ * We put it in unlinked dir to be cleaned up after the scanning
++ */
++ obj =
++ yaffs_find_or_create_by_number(dev, obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ if (!obj)
++ return;
++ obj->is_shadowed = 1;
++ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
++ obj->variant.file_variant.shrink_size = 0;
++ obj->valid = 1; /* So that we don't read any other info for this file */
+
+- } else if (!in->valid) {
+- /* we need to load this info */
++}
+
+- in->valid = 1;
+- in->hdrChunk = chunk;
+
+- if (oh) {
+- in->variantType = oh->type;
++void yaffs_link_fixup(yaffs_dev_t *dev, yaffs_obj_t *hard_list)
++{
++ yaffs_obj_t *hl;
++ yaffs_obj_t *in;
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
+-#endif
++ while (hard_list) {
++ hl = hard_list;
++ hard_list = (yaffs_obj_t *) (hard_list->hard_links.next);
++
++ in = yaffs_find_by_number(dev,
++ hl->variant.hardlink_variant.
++ equiv_id);
+
+- if (oh->shadowsObject > 0)
+- yaffs_HandleShadowedObject(dev,
+- oh->
+- shadowsObject,
+- 1);
+-
+-
+- yaffs_SetObjectName(in, oh->name);
+- parent =
+- yaffs_FindOrCreateObjectByNumber
+- (dev, oh->parentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+-
+- fileSize = oh->fileSize;
+- isShrink = oh->isShrink;
+- equivalentObjectId = oh->equivalentObjectId;
++ if (in) {
++ /* Add the hardlink pointers */
++ hl->variant.hardlink_variant.equiv_obj = in;
++ ylist_add(&hl->hard_links, &in->hard_links);
++ } else {
++ /* Todo Need to report/handle this better.
++ * Got a problem... hardlink to a non-existant object
++ */
++ hl->variant.hardlink_variant.equiv_obj = NULL;
++ YINIT_LIST_HEAD(&hl->hard_links);
+
+- } else {
+- in->variantType = tags.extraObjectType;
+- parent =
+- yaffs_FindOrCreateObjectByNumber
+- (dev, tags.extraParentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+- fileSize = tags.extraFileLength;
+- isShrink = tags.extraIsShrinkHeader;
+- equivalentObjectId = tags.extraEquivalentObjectId;
+- in->lazyLoaded = 1;
++ }
++ }
++}
+
+- }
+- in->dirty = 0;
+
+- if (!parent)
+- alloc_failed = 1;
++static void yaffs_strip_deleted_objs(yaffs_dev_t *dev)
++{
++ /*
++ * Sort out state of unlinked and deleted objects after scanning.
++ */
++ struct ylist_head *i;
++ struct ylist_head *n;
++ yaffs_obj_t *l;
+
+- /* directory stuff...
+- * hook up to parent
+- */
++ if (dev->read_only)
++ return;
+
+- if (parent && parent->variantType ==
+- YAFFS_OBJECT_TYPE_UNKNOWN) {
+- /* Set up as a directory */
+- parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- YINIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (!parent || parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY) {
+- /* Hoosterman, another problem....
+- * We're trying to use a non-directory as a directory
+- */
++ /* Soft delete all the unlinked files */
++ ylist_for_each_safe(i, n,
++ &dev->unlinked_dir->variant.dir_variant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_obj_t, siblings);
++ yaffs_del_obj(l);
++ }
++ }
+
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+- TENDSTR)));
+- parent = dev->lostNFoundDir;
+- }
++ ylist_for_each_safe(i, n,
++ &dev->del_dir->variant.dir_variant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_obj_t, siblings);
++ yaffs_del_obj(l);
++ }
++ }
+
+- yaffs_AddObjectToDirectory(parent, in);
++}
+
+- itsUnlinked = (parent == dev->deletedDir) ||
+- (parent == dev->unlinkedDir);
++/*
++ * This code iterates through all the objects making sure that they are rooted.
++ * Any unrooted objects are re-rooted in lost+found.
++ * An object needs to be in one of:
++ * - Directly under deleted, unlinked
++ * - Directly or indirectly under root.
++ *
++ * Note:
++ * This code assumes that we don't ever change the current relationships between
++ * directories:
++ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
++ * lostNfound->parent == root_dir
++ *
++ * This fixes the problem where directories might have inadvertently been deleted
++ * leaving the object "hanging" without being rooted in the directory tree.
++ */
++
++static int yaffs_has_null_parent(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++ return (obj == dev->del_dir ||
++ obj == dev->unlinked_dir||
++ obj == dev->root_dir);
++}
+
+- if (isShrink) {
+- /* Mark the block as having a shrinkHeader */
+- bi->hasShrinkHeader = 1;
+- }
++static void yaffs_fix_hanging_objs(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ yaffs_obj_t *parent;
++ int i;
++ struct ylist_head *lh;
++ struct ylist_head *n;
++ int depthLimit;
++ int hanging;
+
+- /* Note re hardlinks.
+- * Since we might scan a hardlink before its equivalent object is scanned
+- * we put them all in a list.
+- * After scanning is complete, we should have all the objects, so we run
+- * through this list and fix up all the chains.
+- */
++ if (dev->read_only)
++ return;
+
+- switch (in->variantType) {
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* Todo got a problem */
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+-
+- if (in->variant.fileVariant.
+- scannedFileSize < fileSize) {
+- /* This covers the case where the file size is greater
+- * than where the data is
+- * This will happen if the file is resized to be larger
+- * than its current data extents.
+- */
+- in->variant.fileVariant.fileSize = fileSize;
+- in->variant.fileVariant.scannedFileSize =
+- in->variant.fileVariant.fileSize;
+- }
++ /* Iterate through the objects in each hash entry,
++ * looking at each object.
++ * Make sure it is rooted.
++ */
+
+- if (isShrink &&
+- in->variant.fileVariant.shrinkSize > fileSize) {
+- in->variant.fileVariant.shrinkSize = fileSize;
+- }
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, hash_link);
++ parent= obj->parent;
++
++ if(yaffs_has_null_parent(dev,obj)){
++ /* These directories are not hanging */
++ hanging = 0;
++ }
++ else if(!parent || parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ hanging = 1;
++ else if(yaffs_has_null_parent(dev,parent))
++ hanging = 0;
++ else {
++ /*
++ * Need to follow the parent chain to see if it is hanging.
++ */
++ hanging = 0;
++ depthLimit=100;
+
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- if (!itsUnlinked) {
+- in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObjectId;
+- in->hardLinks.next =
+- (struct ylist_head *) hardList;
+- hardList = in;
+- }
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- if (oh) {
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->alias);
+- if (!in->variant.symLinkVariant.alias)
+- alloc_failed = 1;
+- }
+- break;
++ while(parent != dev->root_dir &&
++ parent->parent &&
++ parent->parent->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ depthLimit > 0){
++ parent = parent->parent;
++ depthLimit--;
+ }
+-
++ if(parent != dev->root_dir)
++ hanging = 1;
++ }
++ if(hanging){
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Hanging object %d moved to lost and found" TENDSTR),
++ obj->obj_id));
++ yaffs_add_obj_to_dir(dev->lost_n_found,obj);
+ }
+-
+ }
++ }
++ }
++}
+
+- } /* End of scanning for each chunk */
+
+- if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+- /* If we got this far while scanning, then the block is fully allocated. */
+- state = YAFFS_BLOCK_STATE_FULL;
+- }
++/*
++ * Delete directory contents for cleaning up lost and found.
++ */
++static void yaffs_del_dir_contents(yaffs_obj_t *dir)
++{
++ yaffs_obj_t *obj;
++ struct ylist_head *lh;
++ struct ylist_head *n;
+
+- bi->blockState = state;
++ if(dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++
++ ylist_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, siblings);
++ if(obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
++ yaffs_del_dir_contents(obj);
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Deleting lost_found object %d" TENDSTR),
++ obj->obj_id));
+
+- /* Now let's see if it was dirty */
+- if (bi->pagesInUse == 0 &&
+- !bi->hasShrinkHeader &&
+- bi->blockState == YAFFS_BLOCK_STATE_FULL) {
+- yaffs_BlockBecameDirty(dev, blk);
++ /* Need to use UnlinkObject since Delete would not handle
++ * hardlinked objects correctly.
++ */
++ yaffs_unlink_obj(obj);
+ }
+-
+ }
++
++}
+
+- if (altBlockIndex)
+- YFREE_ALT(blockIndex);
+- else
+- YFREE(blockIndex);
++static void yaffs_empty_l_n_f(yaffs_dev_t *dev)
++{
++ yaffs_del_dir_contents(dev->lost_n_found);
++}
+
+- /* Ok, we've done all the scanning.
+- * Fix up the hard link chains.
+- * We should now have scanned all the objects, now it's time to add these
+- * hardlinks.
+- */
+- yaffs_HardlinkFixup(dev, hardList);
++static void yaffs_check_obj_details_loaded(yaffs_obj_t *in)
++{
++ __u8 *chunkData;
++ yaffs_obj_header *oh;
++ yaffs_dev_t *dev;
++ yaffs_ext_tags tags;
++ int result;
++ int alloc_failed = 0;
+
++ if (!in)
++ return;
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++ dev = in->my_dev;
+
+- if (alloc_failed)
+- return YAFFS_FAIL;
++#if 0
++ T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
++ in->obj_id,
++ in->lazy_loaded ? "not yet" : "already"));
++#endif
+
+- T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
++ if (in->lazy_loaded && in->hdr_chunk > 0) {
++ in->lazy_loaded = 0;
++ chunkData = yaffs_get_temp_buffer(dev, __LINE__);
+
+- return YAFFS_OK;
+-}
++ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunkData, &tags);
++ oh = (yaffs_obj_header *) chunkData;
+
+-/*------------------------------ Directory Functions ----------------------------- */
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
+
+-static void yaffs_VerifyObjectInDirectory(yaffs_Object *obj)
+-{
+- struct ylist_head *lh;
+- yaffs_Object *listObj;
++#endif
++ yaffs_set_obj_name_from_oh(in, oh);
+
+- int count = 0;
++ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1; /* Not returned to caller */
++ }
+
+- if (!obj) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
+- YBUG();
+- return;
++ yaffs_release_temp_buffer(dev, chunkData, __LINE__);
+ }
++}
+
+- if (yaffs_SkipVerification(obj->myDev))
+- return;
++/*------------------------------ Directory Functions ----------------------------- */
+
+- if (!obj->parent) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
+- YBUG();
++/*
++ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
++ * link (ie. name) is created or deleted in the directory.
++ *
++ * ie.
++ * create dir/a : update dir's mtime/ctime
++ * rm dir/a: update dir's mtime/ctime
++ * modify dir/a: don't update dir's mtimme/ctime
++ *
++ * This can be handled immediately or defered. Defering helps reduce the number
++ * of updates when many files in a directory are changed within a brief period.
++ *
++ * If the directory updating is defered then yaffs_update_dirty_dirs must be
++ * called periodically.
++ */
++
++static void yaffs_update_parent(yaffs_obj_t *obj)
++{
++ yaffs_dev_t *dev;
++ if(!obj)
+ return;
+- }
+-
+- if (obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
+- YBUG();
+- }
+-
+- /* Iterate through the objects in each hash entry */
++#ifndef CONFIG_YAFFS_WINCE
+
+- ylist_for_each(lh, &obj->parent->variant.directoryVariant.children) {
+- if (lh) {
+- listObj = ylist_entry(lh, yaffs_Object, siblings);
+- yaffs_VerifyObject(listObj);
+- if (obj == listObj)
+- count++;
++ dev = obj->my_dev;
++ obj->dirty = 1;
++ obj->yst_mtime = obj->yst_ctime = Y_CURRENT_TIME;
++ if(dev->param.defered_dir_update){
++ struct ylist_head *link = &obj->variant.dir_variant.dirty;
++
++ if(ylist_empty(link)){
++ ylist_add(link,&dev->dirty_dirs);
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Added object %d to dirty directories" TENDSTR),obj->obj_id));
+ }
+- }
+
+- if (count != 1) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
+- YBUG();
+- }
++ } else
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++#endif
+ }
+
+-static void yaffs_VerifyDirectory(yaffs_Object *directory)
++void yaffs_update_dirty_dirs(yaffs_dev_t *dev)
+ {
+- struct ylist_head *lh;
+- yaffs_Object *listObj;
+-
+- if (!directory) {
+- YBUG();
+- return;
+- }
++ struct ylist_head *link;
++ yaffs_obj_t *obj;
++ yaffs_dir_s *dS;
++ yaffs_obj_variant *oV;
+
+- if (yaffs_SkipFullVerification(directory->myDev))
+- return;
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Update dirty directories" TENDSTR)));
+
+- if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variantType));
+- YBUG();
+- }
++ while(!ylist_empty(&dev->dirty_dirs)){
++ link = dev->dirty_dirs.next;
++ ylist_del_init(link);
++
++ dS=ylist_entry(link,yaffs_dir_s,dirty);
++ oV = ylist_entry(dS,yaffs_obj_variant,dir_variant);
++ obj = ylist_entry(oV,yaffs_obj_t,variant);
+
+- /* Iterate through the objects in each hash entry */
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Update directory %d" TENDSTR), obj->obj_id));
+
+- ylist_for_each(lh, &directory->variant.directoryVariant.children) {
+- if (lh) {
+- listObj = ylist_entry(lh, yaffs_Object, siblings);
+- if (listObj->parent != directory) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
+- YBUG();
+- }
+- yaffs_VerifyObjectInDirectory(listObj);
+- }
++ if(obj->dirty)
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+ }
+
+-
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj)
++static void yaffs_remove_obj_from_dir(yaffs_obj_t *obj)
+ {
+- yaffs_Device *dev = obj->myDev;
+- yaffs_Object *parent;
++ yaffs_dev_t *dev = obj->my_dev;
++ yaffs_obj_t *parent;
+
+- yaffs_VerifyObjectInDirectory(obj);
++ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+- yaffs_VerifyDirectory(parent);
++ yaffs_verify_dir(parent);
+
+- if (dev && dev->removeObjectCallback)
+- dev->removeObjectCallback(obj);
++ if (dev && dev->param.remove_obj_fn)
++ dev->param.remove_obj_fn(obj);
+
+
+ ylist_del_init(&obj->siblings);
+ obj->parent = NULL;
+-
+- yaffs_VerifyDirectory(parent);
++
++ yaffs_verify_dir(parent);
+ }
+
+-
+-static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
+- yaffs_Object *obj)
++void yaffs_add_obj_to_dir(yaffs_obj_t *directory,
++ yaffs_obj_t *obj)
+ {
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6699,7 +4495,7 @@ static void yaffs_AddObjectToDirectory(y
+ YBUG();
+ return;
+ }
+- if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: Trying to add an object to a non-directory"
+@@ -6713,27 +4509,27 @@ static void yaffs_AddObjectToDirectory(y
+ }
+
+
+- yaffs_VerifyDirectory(directory);
++ yaffs_verify_dir(directory);
+
+- yaffs_RemoveObjectFromDirectory(obj);
++ yaffs_remove_obj_from_dir(obj);
+
+
+ /* Now add it */
+- ylist_add(&obj->siblings, &directory->variant.directoryVariant.children);
++ ylist_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+- if (directory == obj->myDev->unlinkedDir
+- || directory == obj->myDev->deletedDir) {
++ if (directory == obj->my_dev->unlinked_dir
++ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+- obj->myDev->nUnlinkedFiles++;
+- obj->renameAllowed = 0;
++ obj->my_dev->n_unlinked_files++;
++ obj->rename_allowed = 0;
+ }
+
+- yaffs_VerifyDirectory(directory);
+- yaffs_VerifyObjectInDirectory(obj);
++ yaffs_verify_dir(directory);
++ yaffs_verify_obj_in_dir(obj);
+ }
+
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object *directory,
++yaffs_obj_t *yaffs_find_by_name(yaffs_obj_t *directory,
+ const YCHAR *name)
+ {
+ int sum;
+@@ -6741,7 +4537,7 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+ struct ylist_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+- yaffs_Object *l;
++ yaffs_obj_t *l;
+
+ if (!name)
+ return NULL;
+@@ -6749,39 +4545,39 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: null pointer directory"
++ ("tragedy: yaffs_find_by_name: null pointer directory"
+ TENDSTR)));
+ YBUG();
+ return NULL;
+ }
+- if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++ ("tragedy: yaffs_find_by_name: non-directory" TENDSTR)));
+ YBUG();
+ }
+
+- sum = yaffs_CalcNameSum(name);
++ sum = yaffs_calc_name_sum(name);
+
+- ylist_for_each(i, &directory->variant.directoryVariant.children) {
++ ylist_for_each(i, &directory->variant.dir_variant.children) {
+ if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_obj_t, siblings);
+
+ if (l->parent != directory)
+ YBUG();
+
+- yaffs_CheckObjectDetailsLoaded(l);
++ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+- if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
++ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0)
+ return l;
+- } else if (yaffs_SumCompare(l->sum, sum) || l->hdrChunk <= 0) {
++ } else if (yaffs_sum_cmp(l->sum, sum) || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+- yaffs_GetObjectName(l, buffer,
+- YAFFS_MAX_NAME_LENGTH);
++ yaffs_get_obj_name(l, buffer,
++ YAFFS_MAX_NAME_LENGTH + 1);
+ if (yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+ }
+@@ -6793,31 +4589,31 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+
+
+ #if 0
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
+- int (*fn) (yaffs_Object *))
++int yaffs_ApplyToDirectoryChildren(yaffs_obj_t *the_dir,
++ int (*fn) (yaffs_obj_t *))
+ {
+ struct ylist_head *i;
+- yaffs_Object *l;
++ yaffs_obj_t *l;
+
+- if (!theDir) {
++ if (!the_dir) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: null pointer directory"
++ ("tragedy: yaffs_find_by_name: null pointer directory"
+ TENDSTR)));
+ YBUG();
+ return YAFFS_FAIL;
+ }
+- if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ if (the_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++ ("tragedy: yaffs_find_by_name: non-directory" TENDSTR)));
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+- ylist_for_each(i, &theDir->variant.directoryVariant.children) {
++ ylist_for_each(i, &the_dir->variant.dir_variant.children) {
+ if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_obj_t, siblings);
+ if (l && !fn(l))
+ return YAFFS_FAIL;
+ }
+@@ -6832,82 +4628,175 @@ int yaffs_ApplyToDirectoryChildren(yaffs
+ * actual object.
+ */
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj)
++yaffs_obj_t *yaffs_get_equivalent_obj(yaffs_obj_t *obj)
+ {
+- if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+- obj = obj->variant.hardLinkVariant.equivalentObject;
+- yaffs_CheckObjectDetailsLoaded(obj);
++ obj = obj->variant.hardlink_variant.equiv_obj;
++ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+ }
+
+-int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize)
+-{
+- memset(name, 0, buffSize * sizeof(YCHAR));
+-
+- yaffs_CheckObjectDetailsLoaded(obj);
++/*
++ * A note or two on object names.
++ * * If the object name is missing, we then make one up in the form objnnn
++ *
++ * * ASCII names are stored in the object header's name field from byte zero
++ * * Unicode names are historically stored starting from byte zero.
++ *
++ * Then there are automatic Unicode names...
++ * The purpose of these is to save names in a way that can be read as
++ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
++ * system to share files.
++ *
++ * These automatic unicode are stored slightly differently...
++ * - If the name can fit in the ASCII character space then they are saved as
++ * ascii names as per above.
++ * - If the name needs Unicode then the name is saved in Unicode
++ * starting at oh->name[1].
+
+- if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+- yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
+- } else if (obj->hdrChunk <= 0) {
++ */
++static void yaffs_fix_null_name(yaffs_obj_t * obj,YCHAR * name, int buffer_size)
++{
++ /* Create an object name if we could not find one. */
++ if(yaffs_strnlen(name,YAFFS_MAX_NAME_LENGTH) == 0){
+ YCHAR locName[20];
+ YCHAR numString[20];
+ YCHAR *x = &numString[19];
+- unsigned v = obj->objectId;
++ unsigned v = obj->obj_id;
+ numString[19] = 0;
+- while (v > 0) {
++ while(v>0){
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ yaffs_strcpy(locName, YAFFS_LOSTNFOUND_PREFIX);
+- yaffs_strcat(locName, x);
+- yaffs_strncpy(name, locName, buffSize - 1);
++ yaffs_strcat(locName,x);
++ yaffs_strncpy(name, locName, buffer_size - 1);
++ }
++}
++
++static void yaffs_load_name_from_oh(yaffs_dev_t *dev,YCHAR *name, const YCHAR *ohName, int bufferSize)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ if(dev->param.auto_unicode){
++ if(*ohName){
++ /* It is an ASCII name, so do an ASCII to unicode conversion */
++ const char *asciiOhName = (const char *)ohName;
++ int n = bufferSize - 1;
++ while(n > 0 && *asciiOhName){
++ *name = *asciiOhName;
++ name++;
++ asciiOhName++;
++ n--;
++ }
++ } else
++ yaffs_strncpy(name,ohName+1, bufferSize -1);
++ } else
++#endif
++ yaffs_strncpy(name, ohName, bufferSize - 1);
++}
++
++
++static void yaffs_load_oh_from_name(yaffs_dev_t *dev, YCHAR *ohName, const YCHAR *name)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++
++ int isAscii;
++ YCHAR *w;
++
++ if(dev->param.auto_unicode){
++
++ isAscii = 1;
++ w = name;
++
++ /* Figure out if the name will fit in ascii character set */
++ while(isAscii && *w){
++ if((*w) & 0xff00)
++ isAscii = 0;
++ w++;
++ }
+
++ if(isAscii){
++ /* It is an ASCII name, so do a unicode to ascii conversion */
++ char *asciiOhName = (char *)ohName;
++ int n = YAFFS_MAX_NAME_LENGTH - 1;
++ while(n > 0 && *name){
++ *asciiOhName= *name;
++ name++;
++ asciiOhName++;
++ n--;
++ }
++ } else{
++ /* It is a unicode name, so save starting at the second YCHAR */
++ *ohName = 0;
++ yaffs_strncpy(ohName+1,name, YAFFS_MAX_NAME_LENGTH -2);
++ }
+ }
++ else
++#endif
++ yaffs_strncpy(ohName,name, YAFFS_MAX_NAME_LENGTH - 1);
++
++}
++
++int yaffs_get_obj_name(yaffs_obj_t * obj, YCHAR * name, int buffer_size)
++{
++ memset(name, 0, buffer_size * sizeof(YCHAR));
++
++ yaffs_check_obj_details_loaded(obj);
++
++ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++ yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
++ }
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- else if (obj->shortName[0])
+- yaffs_strcpy(name, obj->shortName);
++ else if (obj->short_name[0]) {
++ yaffs_strcpy(name, obj->short_name);
++ }
+ #endif
+- else {
++ else if(obj->hdr_chunk > 0) {
+ int result;
+- __u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__);
++ __u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
+
+- yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer;
++ yaffs_obj_header *oh = (yaffs_obj_header *) buffer;
+
+- memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
++ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+- if (obj->hdrChunk > 0) {
+- result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
+- obj->hdrChunk, buffer,
++ if (obj->hdr_chunk > 0) {
++ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
++ obj->hdr_chunk, buffer,
+ NULL);
+ }
+- yaffs_strncpy(name, oh->name, buffSize - 1);
++ yaffs_load_name_from_oh(obj->my_dev,name,oh->name,buffer_size);
+
+- yaffs_ReleaseTempBuffer(obj->myDev, buffer, __LINE__);
++ yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
+ }
+
+- return yaffs_strlen(name);
++ yaffs_fix_null_name(obj,name,buffer_size);
++
++ return yaffs_strnlen(name,YAFFS_MAX_NAME_LENGTH);
+ }
+
+-int yaffs_GetObjectFileLength(yaffs_Object *obj)
++
++int yaffs_get_obj_length(yaffs_obj_t *obj)
+ {
+ /* Dereference any hard linking */
+- obj = yaffs_GetEquivalentObject(obj);
++ obj = yaffs_get_equivalent_obj(obj);
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- return obj->variant.fileVariant.fileSize;
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+- return yaffs_strlen(obj->variant.symLinkVariant.alias);
+- else {
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ return obj->variant.file_variant.file_size;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK){
++ if(!obj->variant.symlink_variant.alias)
++ return 0;
++ return yaffs_strnlen(obj->variant.symlink_variant.alias,YAFFS_MAX_ALIAS_LENGTH);
++ } else {
+ /* Only a directory should drop through to here */
+- return obj->myDev->nDataBytesPerChunk;
++ return obj->my_dev->data_bytes_per_chunk;
+ }
+ }
+
+-int yaffs_GetObjectLinkCount(yaffs_Object *obj)
++int yaffs_get_obj_link_count(yaffs_obj_t *obj)
+ {
+ int count = 0;
+ struct ylist_head *i;
+@@ -6915,24 +4804,24 @@ int yaffs_GetObjectLinkCount(yaffs_Objec
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+- ylist_for_each(i, &obj->hardLinks)
++ ylist_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+ }
+
+-int yaffs_GetObjectInode(yaffs_Object *obj)
++int yaffs_get_obj_inode(yaffs_obj_t *obj)
+ {
+- obj = yaffs_GetEquivalentObject(obj);
++ obj = yaffs_get_equivalent_obj(obj);
+
+- return obj->objectId;
++ return obj->obj_id;
+ }
+
+-unsigned yaffs_GetObjectType(yaffs_Object *obj)
++unsigned yaffs_get_obj_type(yaffs_obj_t *obj)
+ {
+- obj = yaffs_GetEquivalentObject(obj);
++ obj = yaffs_get_equivalent_obj(obj);
+
+- switch (obj->variantType) {
++ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+@@ -6960,18 +4849,18 @@ unsigned yaffs_GetObjectType(yaffs_Objec
+ }
+ }
+
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj)
++YCHAR *yaffs_get_symlink_alias(yaffs_obj_t *obj)
+ {
+- obj = yaffs_GetEquivalentObject(obj);
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+- return yaffs_CloneString(obj->variant.symLinkVariant.alias);
++ obj = yaffs_get_equivalent_obj(obj);
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
++ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+- return yaffs_CloneString(_Y(""));
++ return yaffs_clone_str(_Y(""));
+ }
+
+ #ifndef CONFIG_YAFFS_WINCE
+
+-int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr)
++int yaffs_set_attribs(yaffs_obj_t *obj, struct iattr *attr)
+ {
+ unsigned int valid = attr->ia_valid;
+
+@@ -6990,14 +4879,14 @@ int yaffs_SetAttributes(yaffs_Object *ob
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+- yaffs_ResizeFile(obj, attr->ia_size);
++ yaffs_resize_file(obj, attr->ia_size);
+
+- yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+ }
+-int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr)
++int yaffs_get_attribs(yaffs_obj_t *obj, struct iattr *attr)
+ {
+ unsigned int valid = 0;
+
+@@ -7015,7 +4904,7 @@ int yaffs_GetAttributes(yaffs_Object *ob
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+- attr->ia_size = yaffs_GetFileSize(obj);
++ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+@@ -7025,20 +4914,137 @@ int yaffs_GetAttributes(yaffs_Object *ob
+
+ #endif
+
++
++static int yaffs_do_xattrib_mod(yaffs_obj_t *obj, int set, const YCHAR *name, const void *value, int size, int flags)
++{
++ yaffs_xattr_mod xmod;
++
++ int result;
++
++ xmod.set = set;
++ xmod.name = name;
++ xmod.data = value;
++ xmod.size = size;
++ xmod.flags = flags;
++ xmod.result = -ENOSPC;
++
++ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
++
++ if(result > 0)
++ return xmod.result;
++ else
++ return -ENOSPC;
++}
++
++static int yaffs_apply_xattrib_mod(yaffs_obj_t *obj, char *buffer, yaffs_xattr_mod *xmod)
++{
++ int retval = 0;
++ int x_offs = sizeof(yaffs_obj_header);
++ yaffs_dev_t *dev = obj->my_dev;
++ int x_size = dev->data_bytes_per_chunk - sizeof(yaffs_obj_header);
++
++ char * x_buffer = buffer + x_offs;
++
++ if(xmod->set)
++ retval = nval_set(x_buffer, x_size, xmod->name, xmod->data, xmod->size, xmod->flags);
++ else
++ retval = nval_del(x_buffer, x_size, xmod->name);
++
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++
++ xmod->result = retval;
++
++ return retval;
++}
++
++static int yaffs_do_xattrib_fetch(yaffs_obj_t *obj, const YCHAR *name, void *value, int size)
++{
++ char *buffer = NULL;
++ int result;
++ yaffs_ext_tags tags;
++ yaffs_dev_t *dev = obj->my_dev;
++ int x_offs = sizeof(yaffs_obj_header);
++ int x_size = dev->data_bytes_per_chunk - sizeof(yaffs_obj_header);
++
++ char * x_buffer;
++
++ int retval = 0;
++
++ if(obj->hdr_chunk < 1)
++ return -ENODATA;
++
++ /* If we know that the object has no xattribs then don't do all the
++ * reading and parsing.
++ */
++ if(obj->xattr_known && !obj->has_xattr){
++ if(name)
++ return -ENODATA;
++ else
++ return 0;
++ }
++
++ buffer = (char *) yaffs_get_temp_buffer(dev, __LINE__);
++ if(!buffer)
++ return -ENOMEM;
++
++ result = yaffs_rd_chunk_tags_nand(dev,obj->hdr_chunk, (__u8 *)buffer, &tags);
++
++ if(result != YAFFS_OK)
++ retval = -ENOENT;
++ else{
++ x_buffer = buffer + x_offs;
++
++ if (!obj->xattr_known){
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++ }
++
++ if(name)
++ retval = nval_get(x_buffer, x_size, name, value, size);
++ else
++ retval = nval_list(x_buffer, x_size, value,size);
++ }
++ yaffs_release_temp_buffer(dev,(__u8 *)buffer,__LINE__);
++ return retval;
++}
++
++int yaffs_set_xattrib(yaffs_obj_t *obj, const YCHAR *name, const void * value, int size, int flags)
++{
++ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
++}
++
++int yaffs_remove_xattrib(yaffs_obj_t *obj, const YCHAR *name)
++{
++ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
++}
++
++int yaffs_get_xattrib(yaffs_obj_t *obj, const YCHAR *name, void *value, int size)
++{
++ return yaffs_do_xattrib_fetch(obj, name, value, size);
++}
++
++int yaffs_list_xattrib(yaffs_obj_t *obj, char *buffer, int size)
++{
++ return yaffs_do_xattrib_fetch(obj, NULL, buffer,size);
++}
++
++
++
+ #if 0
+-int yaffs_DumpObject(yaffs_Object *obj)
++int yaffs_dump_obj(yaffs_obj_t *obj)
+ {
+ YCHAR name[257];
+
+- yaffs_GetObjectName(obj, name, 256);
++ yaffs_get_obj_name(obj, name, YAFFS_MAX_NAME_LENGTH + 1);
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
+ " chunk %d type %d size %d\n"
+- TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
+- obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdrChunk,
+- yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
++ TENDSTR), obj->obj_id, yaffs_get_obj_inode(obj), name,
++ obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdr_chunk,
++ yaffs_get_obj_type(obj), yaffs_get_obj_length(obj)));
+
+ return YAFFS_OK;
+ }
+@@ -7046,72 +5052,74 @@ int yaffs_DumpObject(yaffs_Object *obj)
+
+ /*---------------------------- Initialisation code -------------------------------------- */
+
+-static int yaffs_CheckDevFunctions(const yaffs_Device *dev)
++static int yaffs_cehck_dev_fns(const yaffs_dev_t *dev)
+ {
+
+ /* Common functions, gotta have */
+- if (!dev->eraseBlockInNAND || !dev->initialiseNAND)
++ if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+ return 0;
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+- if (dev->writeChunkWithTagsToNAND &&
+- dev->readChunkWithTagsFromNAND &&
+- !dev->writeChunkToNAND &&
+- !dev->readChunkFromNAND &&
+- dev->markNANDBlockBad && dev->queryNANDBlock)
++ if (dev->param.write_chunk_tags_fn &&
++ dev->param.read_chunk_tags_fn &&
++ !dev->param.write_chunk_fn &&
++ !dev->param.read_chunk_fn &&
++ dev->param.bad_block_fn &&
++ dev->param.query_block_fn)
+ return 1;
+ #endif
+
+ /* Can use the "spare" style interface for yaffs1 */
+- if (!dev->isYaffs2 &&
+- !dev->writeChunkWithTagsToNAND &&
+- !dev->readChunkWithTagsFromNAND &&
+- dev->writeChunkToNAND &&
+- dev->readChunkFromNAND &&
+- !dev->markNANDBlockBad && !dev->queryNANDBlock)
++ if (!dev->param.is_yaffs2 &&
++ !dev->param.write_chunk_tags_fn &&
++ !dev->param.read_chunk_tags_fn &&
++ dev->param.write_chunk_fn &&
++ dev->param.read_chunk_fn &&
++ !dev->param.bad_block_fn &&
++ !dev->param.query_block_fn)
+ return 1;
+
+- return 0; /* bad */
++ return 0; /* bad */
+ }
+
+
+-static int yaffs_CreateInitialDirectories(yaffs_Device *dev)
++static int yaffs_create_initial_dir(yaffs_dev_t *dev)
+ {
+ /* Initialise the unlinked, deleted, root and lost and found directories */
+
+- dev->lostNFoundDir = dev->rootDir = NULL;
+- dev->unlinkedDir = dev->deletedDir = NULL;
++ dev->lost_n_found = dev->root_dir = NULL;
++ dev->unlinked_dir = dev->del_dir = NULL;
+
+- dev->unlinkedDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++ dev->unlinked_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+- dev->deletedDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++ dev->del_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+- dev->rootDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_ROOT,
++ dev->root_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+- dev->lostNFoundDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
++ dev->lost_n_found =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+- if (dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir) {
+- yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
++ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir && dev->del_dir) {
++ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+ }
+
+-int yaffs_GutsInitialise(yaffs_Device *dev)
++int yaffs_guts_initialise(yaffs_dev_t *dev)
+ {
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+- T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR)));
++ T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_guts_initialise()" TENDSTR)));
+
+ /* Check stuff that must be set */
+
+@@ -7120,52 +5128,52 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ return YAFFS_FAIL;
+ }
+
+- dev->internalStartBlock = dev->startBlock;
+- dev->internalEndBlock = dev->endBlock;
+- dev->blockOffset = 0;
+- dev->chunkOffset = 0;
+- dev->nFreeChunks = 0;
+-
+- dev->gcBlock = -1;
+-
+- if (dev->startBlock == 0) {
+- dev->internalStartBlock = dev->startBlock + 1;
+- dev->internalEndBlock = dev->endBlock + 1;
+- dev->blockOffset = 1;
+- dev->chunkOffset = dev->nChunksPerBlock;
++ dev->internal_start_block = dev->param.start_block;
++ dev->internal_end_block = dev->param.end_block;
++ dev->block_offset = 0;
++ dev->chunk_offset = 0;
++ dev->n_free_chunks = 0;
++
++ dev->gc_block = 0;
++
++ if (dev->param.start_block == 0) {
++ dev->internal_start_block = dev->param.start_block + 1;
++ dev->internal_end_block = dev->param.end_block + 1;
++ dev->block_offset = 1;
++ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+- if ((!dev->inbandTags && dev->isYaffs2 && dev->totalBytesPerChunk < 1024) ||
+- (!dev->isYaffs2 && dev->totalBytesPerChunk < 512) ||
+- (dev->inbandTags && !dev->isYaffs2) ||
+- dev->nChunksPerBlock < 2 ||
+- dev->nReservedBlocks < 2 ||
+- dev->internalStartBlock <= 0 ||
+- dev->internalEndBlock <= 0 ||
+- dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)) { /* otherwise it is too small */
++ if ((!dev->param.inband_tags && dev->param.is_yaffs2 && dev->param.total_bytes_per_chunk < 1024) ||
++ (!dev->param.is_yaffs2 && dev->param.total_bytes_per_chunk < 512) ||
++ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
++ dev->param.chunks_per_block < 2 ||
++ dev->param.n_reserved_blocks < 2 ||
++ dev->internal_start_block <= 0 ||
++ dev->internal_end_block <= 0 ||
++ dev->internal_end_block <= (dev->internal_start_block + dev->param.n_reserved_blocks + 2)) { /* otherwise it is too small */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d "
+- TENDSTR), dev->totalBytesPerChunk, dev->isYaffs2 ? "2" : "", dev->inbandTags));
++ ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d "
++ TENDSTR), dev->param.total_bytes_per_chunk, dev->param.is_yaffs2 ? "2" : "", dev->param.inband_tags));
+ return YAFFS_FAIL;
+ }
+
+- if (yaffs_InitialiseNAND(dev) != YAFFS_OK) {
++ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: InitialiseNAND failed" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+- if (dev->inbandTags)
+- dev->nDataBytesPerChunk = dev->totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
++ if (dev->param.inband_tags)
++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk - sizeof(yaffs_PackedTags2TagsPart);
+ else
+- dev->nDataBytesPerChunk = dev->totalBytesPerChunk;
++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+- if (!yaffs_CheckDevFunctions(dev)) {
++ if (!yaffs_cehck_dev_fns(dev)) {
+ /* Function missing */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+@@ -7175,13 +5183,13 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ }
+
+ /* This is really a compilation check. */
+- if (!yaffs_CheckStructures()) {
++ if (!yaffs_check_structures()) {
+ T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs_CheckStructures failed\n" TENDSTR)));
++ (TSTR("yaffs_check_structures failed\n" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+- if (dev->isMounted) {
++ if (dev->is_mounted) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: device already mounted\n" TENDSTR)));
+ return YAFFS_FAIL;
+@@ -7189,59 +5197,62 @@ int yaffs_GutsInitialise(yaffs_Device *d
+
+ /* Finished with most checks. One or two more checks happen later on too. */
+
+- dev->isMounted = 1;
++ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+- x = dev->nDataBytesPerChunk;
+- /* We always use dev->chunkShift and dev->chunkDiv */
+- dev->chunkShift = Shifts(x);
+- x >>= dev->chunkShift;
+- dev->chunkDiv = x;
+- /* We only use chunk mask if chunkDiv is 1 */
+- dev->chunkMask = (1<<dev->chunkShift) - 1;
++ x = dev->data_bytes_per_chunk;
++ /* We always use dev->chunk_shift and dev->chunk_div */
++ dev->chunk_shift = Shifts(x);
++ x >>= dev->chunk_shift;
++ dev->chunk_div = x;
++ /* We only use chunk mask if chunk_div is 1 */
++ dev->chunk_mask = (1<<dev->chunk_shift) - 1;
+
+ /*
+- * Calculate chunkGroupBits.
+- * We need to find the next power of 2 > than internalEndBlock
++ * Calculate chunk_grp_bits.
++ * We need to find the next power of 2 > than internal_end_block
+ */
+
+- x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
++ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = ShiftsGE(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+- if (!dev->wideTnodesDisabled) {
++ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+- dev->tnodeWidth = 16;
++ dev->tnode_width = 16;
+ else
+- dev->tnodeWidth = bits;
++ dev->tnode_width = bits;
+ } else
+- dev->tnodeWidth = 16;
++ dev->tnode_width = 16;
+
+- dev->tnodeMask = (1<<dev->tnodeWidth)-1;
++ dev->tnode_mask = (1<<dev->tnode_width)-1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+- * to figure out chunk shift and chunkGroupSize
++ * to figure out chunk shift and chunk_grp_size
+ */
+
+- if (bits <= dev->tnodeWidth)
+- dev->chunkGroupBits = 0;
++ if (bits <= dev->tnode_width)
++ dev->chunk_grp_bits = 0;
+ else
+- dev->chunkGroupBits = bits - dev->tnodeWidth;
++ dev->chunk_grp_bits = bits - dev->tnode_width;
+
++ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0)/8;
++ if(dev->tnode_size < sizeof(yaffs_tnode_t))
++ dev->tnode_size = sizeof(yaffs_tnode_t);
+
+- dev->chunkGroupSize = 1 << dev->chunkGroupBits;
++ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+- if (dev->nChunksPerBlock < dev->chunkGroupSize) {
++ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+@@ -7255,85 +5266,89 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ /* OK, we've finished verifying the device, lets continue with initialisation */
+
+ /* More device initialisation */
+- dev->garbageCollections = 0;
+- dev->passiveGarbageCollections = 0;
+- dev->currentDirtyChecker = 0;
+- dev->bufferedBlock = -1;
+- dev->doingBufferedBlockRewrite = 0;
+- dev->nDeletedFiles = 0;
+- dev->nBackgroundDeletions = 0;
+- dev->nUnlinkedFiles = 0;
+- dev->eccFixed = 0;
+- dev->eccUnfixed = 0;
+- dev->tagsEccFixed = 0;
+- dev->tagsEccUnfixed = 0;
+- dev->nErasureFailures = 0;
+- dev->nErasedBlocks = 0;
+- dev->isDoingGC = 0;
+- dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
++ dev->all_gcs = 0;
++ dev->passive_gc_count = 0;
++ dev->oldest_dirty_gc_count = 0;
++ dev->bg_gcs = 0;
++ dev->gc_block_finder = 0;
++ dev->buffered_block = -1;
++ dev->doing_buffered_block_rewrite = 0;
++ dev->n_deleted_files = 0;
++ dev->n_bg_deletions = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_ecc_fixed = 0;
++ dev->n_ecc_unfixed = 0;
++ dev->n_tags_ecc_fixed = 0;
++ dev->n_tags_ecc_unfixed = 0;
++ dev->n_erase_failures = 0;
++ dev->n_erased_blocks = 0;
++ dev->gc_disable= 0;
++ dev->has_pending_prioritised_gc = 1; /* Assume the worst for now, will get fixed on first GC */
++ YINIT_LIST_HEAD(&dev->dirty_dirs);
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+- if (!yaffs_InitialiseTempBuffers(dev))
++ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+- dev->srCache = NULL;
+- dev->gcCleanupList = NULL;
++ dev->cache = NULL;
++ dev->gc_cleanup_list = NULL;
+
+
+ if (!init_failed &&
+- dev->nShortOpCaches > 0) {
++ dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+- int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
++ int cacheBytes = dev->param.n_caches * sizeof(yaffs_cache_t);
+
+- if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
+- dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
++ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
++ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+- dev->srCache = YMALLOC(srCacheBytes);
++ dev->cache = YMALLOC(cacheBytes);
+
+- buf = (__u8 *) dev->srCache;
++ buf = (__u8 *) dev->cache;
+
+- if (dev->srCache)
+- memset(dev->srCache, 0, srCacheBytes);
++ if (dev->cache)
++ memset(dev->cache, 0, cacheBytes);
+
+- for (i = 0; i < dev->nShortOpCaches && buf; i++) {
+- dev->srCache[i].object = NULL;
+- dev->srCache[i].lastUse = 0;
+- dev->srCache[i].dirty = 0;
+- dev->srCache[i].data = buf = YMALLOC_DMA(dev->totalBytesPerChunk);
++ for (i = 0; i < dev->param.n_caches && buf; i++) {
++ dev->cache[i].object = NULL;
++ dev->cache[i].last_use = 0;
++ dev->cache[i].dirty = 0;
++ dev->cache[i].data = buf = YMALLOC_DMA(dev->param.total_bytes_per_chunk);
+ }
+ if (!buf)
+ init_failed = 1;
+
+- dev->srLastUse = 0;
++ dev->cache_last_use = 0;
+ }
+
+- dev->cacheHits = 0;
++ dev->cache_hits = 0;
+
+ if (!init_failed) {
+- dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
+- if (!dev->gcCleanupList)
++ dev->gc_cleanup_list = YMALLOC(dev->param.chunks_per_block * sizeof(__u32));
++ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+- if (dev->isYaffs2)
+- dev->useHeaderFileSize = 1;
++ if (dev->param.is_yaffs2)
++ dev->param.use_header_file_size = 1;
+
+- if (!init_failed && !yaffs_InitialiseBlocks(dev))
++ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+- yaffs_InitialiseTnodes(dev);
+- yaffs_InitialiseObjects(dev);
++ yaffs_init_tnodes_and_objs(dev);
+
+- if (!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+- if (dev->isYaffs2) {
+- if (yaffs_CheckpointRestore(dev)) {
+- yaffs_CheckObjectDetailsLoaded(dev->rootDir);
++ if (dev->param.is_yaffs2) {
++ if (yaffs2_checkpt_restore(dev)) {
++ yaffs_check_obj_details_loaded(dev->root_dir);
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: restored from checkpoint" TENDSTR)));
+ } else {
+@@ -7341,128 +5356,129 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ /* Clean up the mess caused by an aborted checkpoint load
+ * and scan backwards.
+ */
+- yaffs_DeinitialiseBlocks(dev);
+- yaffs_DeinitialiseTnodes(dev);
+- yaffs_DeinitialiseObjects(dev);
++ yaffs_deinit_blocks(dev);
+
++ yaffs_deinit_tnodes_and_objs(dev);
+
+- dev->nErasedBlocks = 0;
+- dev->nFreeChunks = 0;
+- dev->allocationBlock = -1;
+- dev->allocationPage = -1;
+- dev->nDeletedFiles = 0;
+- dev->nUnlinkedFiles = 0;
+- dev->nBackgroundDeletions = 0;
+- dev->oldestDirtySequence = 0;
++ dev->n_erased_blocks = 0;
++ dev->n_free_chunks = 0;
++ dev->alloc_block = -1;
++ dev->alloc_page = -1;
++ dev->n_deleted_files = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_bg_deletions = 0;
+
+- if (!init_failed && !yaffs_InitialiseBlocks(dev))
++ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+- yaffs_InitialiseTnodes(dev);
+- yaffs_InitialiseObjects(dev);
++ yaffs_init_tnodes_and_objs(dev);
+
+- if (!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+- if (!init_failed && !yaffs_ScanBackwards(dev))
++ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+- } else if (!yaffs_Scan(dev))
++ } else if (!yaffs1_scan(dev))
+ init_failed = 1;
+
+- yaffs_StripDeletedObjects(dev);
++ yaffs_strip_deleted_objs(dev);
++ yaffs_fix_hanging_objs(dev);
++ if(dev->param.empty_lost_n_found)
++ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
++ (TSTR("yaffs: yaffs_guts_initialise() aborted.\n" TENDSTR)));
+
+- yaffs_Deinitialise(dev);
++ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+- dev->nPageReads = 0;
+- dev->nPageWrites = 0;
+- dev->nBlockErasures = 0;
+- dev->nGCCopies = 0;
+- dev->nRetriedWrites = 0;
+-
+- dev->nRetiredBlocks = 0;
+-
+- yaffs_VerifyFreeChunks(dev);
+- yaffs_VerifyBlocks(dev);
+-
++ dev->n_page_reads = 0;
++ dev->n_page_writes = 0;
++ dev->n_erasures = 0;
++ dev->n_gc_copies = 0;
++ dev->n_retired_writes = 0;
++
++ dev->n_retired_blocks = 0;
++
++ yaffs_verify_free_chunks(dev);
++ yaffs_verify_blocks(dev);
++
++ /* Clean up any aborted checkpoint data */
++ if(!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
++ yaffs2_checkpt_invalidate(dev);
+
+ T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: yaffs_GutsInitialise() done.\n" TENDSTR)));
++ (TSTR("yaffs: yaffs_guts_initialise() done.\n" TENDSTR)));
+ return YAFFS_OK;
+
+ }
+
+-void yaffs_Deinitialise(yaffs_Device *dev)
++void yaffs_deinitialise(yaffs_dev_t *dev)
+ {
+- if (dev->isMounted) {
++ if (dev->is_mounted) {
+ int i;
+
+- yaffs_DeinitialiseBlocks(dev);
+- yaffs_DeinitialiseTnodes(dev);
+- yaffs_DeinitialiseObjects(dev);
+- if (dev->nShortOpCaches > 0 &&
+- dev->srCache) {
++ yaffs_deinit_blocks(dev);
++ yaffs_deinit_tnodes_and_objs(dev);
++ if (dev->param.n_caches > 0 &&
++ dev->cache) {
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].data)
+- YFREE(dev->srCache[i].data);
+- dev->srCache[i].data = NULL;
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].data)
++ YFREE(dev->cache[i].data);
++ dev->cache[i].data = NULL;
+ }
+
+- YFREE(dev->srCache);
+- dev->srCache = NULL;
++ YFREE(dev->cache);
++ dev->cache = NULL;
+ }
+
+- YFREE(dev->gcCleanupList);
++ YFREE(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+- YFREE(dev->tempBuffer[i].buffer);
++ YFREE(dev->temp_buffer[i].buffer);
+
+- dev->isMounted = 0;
++ dev->is_mounted = 0;
+
+- if (dev->deinitialiseNAND)
+- dev->deinitialiseNAND(dev);
++ if (dev->param.deinitialise_flash_fn)
++ dev->param.deinitialise_flash_fn(dev);
+ }
+ }
+
+-static int yaffs_CountFreeChunks(yaffs_Device *dev)
++int yaffs_count_free_chunks(yaffs_dev_t *dev)
+ {
+- int nFree;
++ int nFree=0;
+ int b;
+
+- yaffs_BlockInfo *blk;
+-
+- for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
+- b++) {
+- blk = yaffs_GetBlockInfo(dev, b);
++ yaffs_block_info_t *blk;
+
+- switch (blk->blockState) {
++ blk = dev->block_info;
++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ nFree +=
+- (dev->nChunksPerBlock - blk->pagesInUse +
+- blk->softDeletions);
++ (dev->param.chunks_per_block - blk->pages_in_use +
++ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
++ blk++;
+ }
+
+ return nFree;
+ }
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev)
++int yaffs_get_n_free_chunks(yaffs_dev_t *dev)
+ {
+ /* This is what we report to the outside world */
+
+@@ -7472,30 +5488,28 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+ int i;
+
+ #if 1
+- nFree = dev->nFreeChunks;
++ nFree = dev->n_free_chunks;
+ #else
+- nFree = yaffs_CountFreeChunks(dev);
++ nFree = yaffs_count_free_chunks(dev);
+ #endif
+
+- nFree += dev->nDeletedFiles;
++ nFree += dev->n_deleted_files;
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+- for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].dirty)
++ for (nDirtyCacheChunks = 0, i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].dirty)
+ nDirtyCacheChunks++;
+ }
+
+ nFree -= nDirtyCacheChunks;
+
+- nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
++ nFree -= ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+- blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
+- if (blocksForCheckpoint < 0)
+- blocksForCheckpoint = 0;
++ blocksForCheckpoint = yaffs_calc_checkpt_blocks_required(dev);
+
+- nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
++ nFree -= (blocksForCheckpoint * dev->param.chunks_per_block);
+
+ if (nFree < 0)
+ nFree = 0;
+@@ -7504,49 +5518,27 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+
+ }
+
+-static int yaffs_freeVerificationFailures;
+-
+-static void yaffs_VerifyFreeChunks(yaffs_Device *dev)
+-{
+- int counted;
+- int difference;
+-
+- if (yaffs_SkipVerification(dev))
+- return;
+-
+- counted = yaffs_CountFreeChunks(dev);
+-
+- difference = dev->nFreeChunks - counted;
+-
+- if (difference) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
+- dev->nFreeChunks, counted, difference));
+- yaffs_freeVerificationFailures++;
+- }
+-}
+
+ /*---------------------------------------- YAFFS test code ----------------------*/
+
+-#define yaffs_CheckStruct(structure, syze, name) \
++#define yaffs_check_struct(structure, syze, name) \
+ do { \
+ if (sizeof(structure) != syze) { \
+ T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\
+- name, syze, sizeof(structure))); \
++ name, syze, (int) sizeof(structure))); \
+ return YAFFS_FAIL; \
+ } \
+ } while (0)
+
+-static int yaffs_CheckStructures(void)
++static int yaffs_check_structures(void)
+ {
+-/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags"); */
+-/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */
+-/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */
+-#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode");
+-#endif
++/* yaffs_check_struct(yaffs_tags_t,8,"yaffs_tags_t"); */
++/* yaffs_check_struct(yaffs_tags_union_t,8,"yaffs_tags_union_t"); */
++/* yaffs_check_struct(yaffs_spare,16,"yaffs_spare"); */
++/* yaffs_check_struct(yaffs_tnode_t, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_tnode_t"); */
++
+ #ifndef CONFIG_YAFFS_WINCE
+- yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader");
++ yaffs_check_struct(yaffs_obj_header, 512, "yaffs_obj_header");
+ #endif
+ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_guts.h
++++ b/fs/yaffs2/yaffs_guts.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -16,8 +16,9 @@
+ #ifndef __YAFFS_GUTS_H__
+ #define __YAFFS_GUTS_H__
+
+-#include "devextras.h"
+ #include "yportenv.h"
++#include "devextras.h"
++#include "yaffs_list.h"
+
+ #define YAFFS_OK 1
+ #define YAFFS_FAIL 0
+@@ -52,7 +53,6 @@
+
+ #define YAFFS_MAX_CHUNK_ID 0x000FFFFF
+
+-#define YAFFS_UNUSED_OBJECT_ID 0x0003FFFF
+
+ #define YAFFS_ALLOCATION_NOBJECTS 100
+ #define YAFFS_ALLOCATION_NTNODES 100
+@@ -62,8 +62,9 @@
+
+
+ #define YAFFS_OBJECT_SPACE 0x40000
++#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE -1)
+
+-#define YAFFS_CHECKPOINT_VERSION 3
++#define YAFFS_CHECKPOINT_VERSION 4
+
+ #ifdef CONFIG_YAFFS_UNICODE
+ #define YAFFS_MAX_NAME_LENGTH 127
+@@ -81,12 +82,11 @@
+ #define YAFFS_OBJECTID_UNLINKED 3
+ #define YAFFS_OBJECTID_DELETED 4
+
+-/* Sseudo object ids for checkpointing */
++/* Pseudo object ids for checkpointing */
+ #define YAFFS_OBJECTID_SB_HEADER 0x10
+ #define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+ #define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+-/* */
+
+ #define YAFFS_MAX_SHORT_OP_CACHES 20
+
+@@ -113,18 +113,14 @@
+
+ /* ChunkCache is used for short read/write operations.*/
+ typedef struct {
+- struct yaffs_ObjectStruct *object;
+- int chunkId;
+- int lastUse;
++ struct yaffs_obj_s *object;
++ int chunk_id;
++ int last_use;
+ int dirty;
+- int nBytes; /* Only valid if the cache is dirty */
++ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+-#ifdef CONFIG_YAFFS_YAFFS2
+ __u8 *data;
+-#else
+- __u8 data[YAFFS_BYTES_PER_CHUNK];
+-#endif
+-} yaffs_ChunkCache;
++} yaffs_cache_t;
+
+
+
+@@ -135,18 +131,18 @@ typedef struct {
+
+ #ifndef CONFIG_YAFFS_NO_YAFFS1
+ typedef struct {
+- unsigned chunkId:20;
+- unsigned serialNumber:2;
+- unsigned byteCountLSB:10;
+- unsigned objectId:18;
++ unsigned chunk_id:20;
++ unsigned serial_number:2;
++ unsigned n_bytes_lsb:10;
++ unsigned obj_id:18;
+ unsigned ecc:12;
+- unsigned byteCountMSB:2;
+-} yaffs_Tags;
++ unsigned n_bytes_msb:2;
++} yaffs_tags_t;
+
+ typedef union {
+- yaffs_Tags asTags;
+- __u8 asBytes[8];
+-} yaffs_TagsUnion;
++ yaffs_tags_t as_tags;
++ __u8 as_bytes[8];
++} yaffs_tags_union_t;
+
+ #endif
+
+@@ -157,7 +153,7 @@ typedef enum {
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+-} yaffs_ECCResult;
++} yaffs_ecc_result;
+
+ typedef enum {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+@@ -166,64 +162,64 @@ typedef enum {
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+-} yaffs_ObjectType;
++} yaffs_obj_type;
+
+ #define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+ typedef struct {
+
+- unsigned validMarker0;
+- unsigned chunkUsed; /* Status of the chunk: used or unused */
+- unsigned objectId; /* If 0 then this is not part of an object (unused) */
+- unsigned chunkId; /* If 0 then this is a header, else a data chunk */
+- unsigned byteCount; /* Only valid for data chunks */
++ unsigned validity1;
++ unsigned chunk_used; /* Status of the chunk: used or unused */
++ unsigned obj_id; /* If 0 then this is not part of an object (unused) */
++ unsigned chunk_id; /* If 0 then this is a header, else a data chunk */
++ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+- yaffs_ECCResult eccResult;
+- unsigned blockBad;
++ yaffs_ecc_result ecc_result;
++ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+- unsigned chunkDeleted; /* The chunk is marked deleted */
+- unsigned serialNumber; /* Yaffs1 2-bit serial number */
++ unsigned is_deleted; /* The chunk is marked deleted */
++ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+- unsigned sequenceNumber; /* The sequence number of this block */
++ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+- unsigned extraHeaderInfoAvailable; /* There is extra info available if this is not zero */
+- unsigned extraParentObjectId; /* The parent object */
+- unsigned extraIsShrinkHeader; /* Is it a shrink header? */
+- unsigned extraShadows; /* Does this shadow another object? */
++ unsigned extra_available; /* There is extra info available if this is not zero */
++ unsigned extra_parent_id; /* The parent object */
++ unsigned extra_is_shrink; /* Is it a shrink header? */
++ unsigned extra_shadows; /* Does this shadow another object? */
+
+- yaffs_ObjectType extraObjectType; /* What object type? */
++ yaffs_obj_type extra_obj_type; /* What object type? */
+
+- unsigned extraFileLength; /* Length if it is a file */
+- unsigned extraEquivalentObjectId; /* Equivalent object Id if it is a hard link */
++ unsigned extra_length; /* Length if it is a file */
++ unsigned extra_equiv_id; /* Equivalent object Id if it is a hard link */
+
+- unsigned validMarker1;
++ unsigned validty1;
+
+-} yaffs_ExtendedTags;
++} yaffs_ext_tags;
+
+ /* Spare structure for YAFFS1 */
+ typedef struct {
+- __u8 tagByte0;
+- __u8 tagByte1;
+- __u8 tagByte2;
+- __u8 tagByte3;
+- __u8 pageStatus; /* set to 0 to delete the chunk */
+- __u8 blockStatus;
+- __u8 tagByte4;
+- __u8 tagByte5;
++ __u8 tb0;
++ __u8 tb1;
++ __u8 tb2;
++ __u8 tb3;
++ __u8 page_status; /* set to 0 to delete the chunk */
++ __u8 block_status;
++ __u8 tb4;
++ __u8 tb5;
+ __u8 ecc1[3];
+- __u8 tagByte6;
+- __u8 tagByte7;
++ __u8 tb6;
++ __u8 tb7;
+ __u8 ecc2[3];
+-} yaffs_Spare;
++} yaffs_spare;
+
+ /*Special structure for passing through to mtd */
+-struct yaffs_NANDSpare {
+- yaffs_Spare spare;
++struct yaffs_nand_spare {
++ yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+ };
+@@ -234,6 +230,8 @@ typedef enum {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
++ /* Being scanned */
++
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+ /* The block might have something on it (ie it is allocating or full, perhaps empty)
+ * but it needs to be scanned to determine its true state.
+@@ -249,67 +247,69 @@ typedef enum {
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+- * allocation. Should never be more than one of these
++ * allocation. Should never be more than one of these.
++ * If a block is only partially allocated at mount it is treated as full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
++ * If a block was only partially allocated when mounted we treat
++ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+- /* All pages have been allocated and deleted.
++ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+- /* This block is assigned to holding checkpoint data.
+- */
++ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+-} yaffs_BlockState;
++} yaffs_block_state_t;
+
+ #define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+
+ typedef struct {
+
+- int softDeletions:10; /* number of soft deleted pages */
+- int pagesInUse:10; /* number of pages in use */
+- unsigned blockState:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+- __u32 needsRetiring:1; /* Data has failed on this block, need to get valid data off */
++ int soft_del_pages:10; /* number of soft deleted pages */
++ int pages_in_use:10; /* number of pages in use */
++ unsigned block_state:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
++ __u32 needs_retiring:1; /* Data has failed on this block, need to get valid data off */
+ /* and retire the block. */
+- __u32 skipErasedCheck:1; /* If this is set we can skip the erased check on this block */
+- __u32 gcPrioritise:1; /* An ECC check or blank check has failed on this block.
++ __u32 skip_erased_check:1; /* If this is set we can skip the erased check on this block */
++ __u32 gc_prioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+- __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
++ __u32 chunk_error_strikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+- __u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
+- __u32 sequenceNumber; /* block sequence number for yaffs2 */
++ __u32 has_shrink_hdr:1; /* This block has at least one shrink object header */
++ __u32 seq_number; /* block sequence number for yaffs2 */
+ #endif
+
+-} yaffs_BlockInfo;
++} yaffs_block_info_t;
+
+ /* -------------------------- Object structure -------------------------------*/
+ /* This is the object structure as stored on NAND */
+
+ typedef struct {
+- yaffs_ObjectType type;
++ yaffs_obj_type type;
+
+ /* Apply to everything */
+- int parentObjectId;
+- __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
++ int parent_obj_id;
++ __u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to directories, files, symlinks - not hard links */
+ __u32 yst_mode; /* protection */
+
+ #ifdef CONFIG_YAFFS_WINCE
+- __u32 notForWinCE[5];
++ __u32 not_for_wince[5];
+ #else
+ __u32 yst_uid;
+ __u32 yst_gid;
+@@ -319,10 +319,10 @@ typedef struct {
+ #endif
+
+ /* File size applies to files only */
+- int fileSize;
++ int file_size;
+
+ /* Equivalent object id applies to hard links only. */
+- int equivalentObjectId;
++ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+@@ -334,40 +334,29 @@ typedef struct {
+ __u32 win_atime[2];
+ __u32 win_mtime[2];
+ #else
+- __u32 roomToGrow[6];
++ __u32 room_to_grow[6];
+
+ #endif
+- __u32 inbandShadowsObject;
+- __u32 inbandIsShrink;
++ __u32 inband_shadowed_obj_id;
++ __u32 inband_is_shrink;
+
+- __u32 reservedSpace[2];
+- int shadowsObject; /* This object header shadows the specified object if > 0 */
++ __u32 reserved[2];
++ int shadows_obj; /* This object header shadows the specified object if > 0 */
+
+- /* isShrink applies to object headers written when we shrink the file (ie resize) */
+- __u32 isShrink;
++ /* is_shrink applies to object headers written when we shrink the file (ie resize) */
++ __u32 is_shrink;
+
+-} yaffs_ObjectHeader;
++} yaffs_obj_header;
+
+ /*--------------------------- Tnode -------------------------- */
+
+-union yaffs_Tnode_union {
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL + 1];
+-#else
+- union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL];
+-#endif
+-/* __u16 level0[YAFFS_NTNODES_LEVEL0]; */
++union yaffs_tnode_union {
++ union yaffs_tnode_union *internal[YAFFS_NTNODES_INTERNAL];
+
+ };
+
+-typedef union yaffs_Tnode_union yaffs_Tnode;
++typedef union yaffs_tnode_union yaffs_tnode_t;
+
+-struct yaffs_TnodeList_struct {
+- struct yaffs_TnodeList_struct *next;
+- yaffs_Tnode *tnodes;
+-};
+-
+-typedef struct yaffs_TnodeList_struct yaffs_TnodeList;
+
+ /*------------------------ Object -----------------------------*/
+ /* An object can be one of:
+@@ -378,82 +367,85 @@ typedef struct yaffs_TnodeList_struct ya
+ */
+
+ typedef struct {
+- __u32 fileSize;
+- __u32 scannedFileSize;
+- __u32 shrinkSize;
+- int topLevel;
+- yaffs_Tnode *top;
+-} yaffs_FileStructure;
++ __u32 file_size;
++ __u32 scanned_size;
++ __u32 shrink_size;
++ int top_level;
++ yaffs_tnode_t *top;
++} yaffs_file_s;
+
+ typedef struct {
+ struct ylist_head children; /* list of child links */
+-} yaffs_DirectoryStructure;
++ struct ylist_head dirty; /* Entry for list of dirty directories */
++} yaffs_dir_s;
+
+ typedef struct {
+ YCHAR *alias;
+-} yaffs_SymLinkStructure;
++} yaffs_symlink_t;
+
+ typedef struct {
+- struct yaffs_ObjectStruct *equivalentObject;
+- __u32 equivalentObjectId;
+-} yaffs_HardLinkStructure;
++ struct yaffs_obj_s *equiv_obj;
++ __u32 equiv_id;
++} yaffs_hard_link_s;
+
+ typedef union {
+- yaffs_FileStructure fileVariant;
+- yaffs_DirectoryStructure directoryVariant;
+- yaffs_SymLinkStructure symLinkVariant;
+- yaffs_HardLinkStructure hardLinkVariant;
+-} yaffs_ObjectVariant;
++ yaffs_file_s file_variant;
++ yaffs_dir_s dir_variant;
++ yaffs_symlink_t symlink_variant;
++ yaffs_hard_link_s hardlink_variant;
++} yaffs_obj_variant;
++
++
+
+-struct yaffs_ObjectStruct {
++struct yaffs_obj_s {
+ __u8 deleted:1; /* This should only apply to unlinked files. */
+- __u8 softDeleted:1; /* it has also been soft deleted */
++ __u8 soft_del:1; /* it has also been soft deleted */
+ __u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory.*/
+ __u8 fake:1; /* A fake object has no presence on NAND. */
+- __u8 renameAllowed:1; /* Some objects are not allowed to be renamed. */
+- __u8 unlinkAllowed:1;
++ __u8 rename_allowed:1; /* Some objects are not allowed to be renamed. */
++ __u8 unlink_allowed:1;
+ __u8 dirty:1; /* the object needs to be written to flash */
+ __u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available (ie. file data records appear before the header).
+ */
+- __u8 lazyLoaded:1; /* This object has been lazy loaded and is missing some detail */
++ __u8 lazy_loaded:1; /* This object has been lazy loaded and is missing some detail */
+
+- __u8 deferedFree:1; /* For Linux kernel. Object is removed from NAND, but is
++ __u8 defered_free:1; /* For Linux kernel. Object is removed from NAND, but is
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
+- __u8 beingCreated:1; /* This object is still being created so skip some checks. */
++ __u8 being_created:1; /* This object is still being created so skip some checks. */
++ __u8 is_shadowed:1; /* This object is shadowed on the way to being renamed. */
++
++ __u8 xattr_known:1; /* We know if this has object has xattribs or not. */
++ __u8 has_xattr:1; /* This object has xattribs. Valid if xattr_known. */
+
+ __u8 serial; /* serial number of chunk in NAND. Cached here */
+ __u16 sum; /* sum of the name to speed searching */
+
+- struct yaffs_DeviceStruct *myDev; /* The device I'm on */
++ struct yaffs_dev_s *my_dev; /* The device I'm on */
+
+- struct ylist_head hashLink; /* list of objects in this hash bucket */
++ struct ylist_head hash_link; /* list of objects in this hash bucket */
+
+- struct ylist_head hardLinks; /* all the equivalent hard linked objects */
++ struct ylist_head hard_links; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+- struct yaffs_ObjectStruct *parent;
++ struct yaffs_obj_s *parent;
+ struct ylist_head siblings;
+
+ /* Where's my object header in NAND? */
+- int hdrChunk;
++ int hdr_chunk;
+
+- int nDataChunks; /* Number of data chunks attached to the file. */
++ int n_data_chunks; /* Number of data chunks attached to the file. */
+
+- __u32 objectId; /* the object id value */
++ __u32 obj_id; /* the object id value */
+
+ __u32 yst_mode;
+
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- YCHAR shortName[YAFFS_SHORT_NAME_LENGTH + 1];
+-#endif
+-
+-#ifndef __KERNEL__
+- __u32 inUse;
++ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+ #endif
+
+ #ifdef CONFIG_YAFFS_WINCE
+@@ -470,53 +462,43 @@ struct yaffs_ObjectStruct {
+
+ __u32 yst_rdev;
+
+-#ifdef __KERNEL__
+- struct inode *myInode;
++ void *my_inode;
+
+-#endif
++ yaffs_obj_type variant_type;
+
+- yaffs_ObjectType variantType;
++ yaffs_obj_variant variant;
+
+- yaffs_ObjectVariant variant;
+-
+-};
+-
+-typedef struct yaffs_ObjectStruct yaffs_Object;
+-
+-struct yaffs_ObjectList_struct {
+- yaffs_Object *objects;
+- struct yaffs_ObjectList_struct *next;
+ };
+
+-typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
++typedef struct yaffs_obj_s yaffs_obj_t;
+
+ typedef struct {
+ struct ylist_head list;
+ int count;
+-} yaffs_ObjectBucket;
++} yaffs_obj_bucket;
+
+
+-/* yaffs_CheckpointObject holds the definition of an object as dumped
++/* yaffs_checkpt_obj_t holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+ typedef struct {
+- int structType;
+- __u32 objectId;
+- __u32 parentId;
+- int hdrChunk;
+- yaffs_ObjectType variantType:3;
++ int struct_type;
++ __u32 obj_id;
++ __u32 parent_id;
++ int hdr_chunk;
++ yaffs_obj_type variant_type:3;
+ __u8 deleted:1;
+- __u8 softDeleted:1;
++ __u8 soft_del:1;
+ __u8 unlinked:1;
+ __u8 fake:1;
+- __u8 renameAllowed:1;
+- __u8 unlinkAllowed:1;
++ __u8 rename_allowed:1;
++ __u8 unlink_allowed:1;
+ __u8 serial;
+
+- int nDataChunks;
+- __u32 fileSizeOrEquivalentObjectId;
+-} yaffs_CheckpointObject;
++ int n_data_chunks;
++ __u32 size_or_equiv_obj;
++} yaffs_checkpt_obj_t;
+
+ /*--------------------- Temporary buffers ----------------
+ *
+@@ -526,379 +508,462 @@ typedef struct {
+ typedef struct {
+ __u8 *buffer;
+ int line; /* track from whence this buffer was allocated */
+- int maxLine;
+-} yaffs_TempBuffer;
++ int max_line;
++} yaffs_buffer_t;
+
+ /*----------------- Device ---------------------------------*/
+
+-struct yaffs_DeviceStruct {
+- struct ylist_head devList;
+- const char *name;
+-
+- /* Entry parameters set up way early. Yaffs sets up the rest.*/
+- int nDataBytesPerChunk; /* Should be a power of 2 >= 512 */
+- int nChunksPerBlock; /* does not need to be a power of 2 */
+- int spareBytesPerChunk; /* spare area size */
+- int startBlock; /* Start block we're allowed to use */
+- int endBlock; /* End block we're allowed to use */
+- int nReservedBlocks; /* We want this tuneable so that we can reduce */
+- /* reserved blocks on NOR and RAM. */
+-
+
+- /* Stuff used by the shared space checkpointing mechanism */
+- /* If this value is zero, then this mechanism is disabled */
++struct yaffs_param_s {
++ const YCHAR *name;
+
+-/* int nCheckpointReservedBlocks; */ /* Blocks to reserve for checkpoint data */
++ /*
++ * Entry parameters set up way early. Yaffs sets up the rest.
++ * The structure should be zeroed out before use so that unused
++ * and defualt values are zero.
++ */
++
++ int inband_tags; /* Use unband tags */
++ __u32 total_bytes_per_chunk; /* Should be >= 512, does not need to be a power of 2 */
++ int chunks_per_block; /* does not need to be a power of 2 */
++ int spare_bytes_per_chunk; /* spare area size */
++ int start_block; /* Start block we're allowed to use */
++ int end_block; /* End block we're allowed to use */
++ int n_reserved_blocks; /* We want this tuneable so that we can reduce */
++ /* reserved blocks on NOR and RAM. */
+
+
+- int nShortOpCaches; /* If <= 0, then short op caching is disabled, else
+- * the number of short op caches (don't use too many)
++ int n_caches; /* If <= 0, then short op caching is disabled, else
++ * the number of short op caches (don't use too many).
++ * 10 to 20 is a good bet.
+ */
++ int use_nand_ecc; /* Flag to decide whether or not to use NANDECC on data (yaffs1) */
++ int no_tags_ecc; /* Flag to decide whether or not to do ECC on packed tags (yaffs2) */
+
+- int useHeaderFileSize; /* Flag to determine if we should use file sizes from the header */
++ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+- int useNANDECC; /* Flag to decide whether or not to use NANDECC */
++ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+- void *genericDevice; /* Pointer to device context
+- * On an mtd this holds the mtd pointer.
+- */
+- void *superBlock;
++ int refresh_period; /* How often we should check to do a block refresh */
++
++ /* Checkpoint control. Can be set before or after initialisation */
++ __u8 skip_checkpt_rd;
++ __u8 skip_checkpt_wr;
++
++ int enable_xattr; /* Enable xattribs */
+
+ /* NAND access functions (Must be set before calling YAFFS)*/
+
+- int (*writeChunkToNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- const yaffs_Spare *spare);
+- int (*readChunkFromNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 *data,
+- yaffs_Spare *spare);
+- int (*eraseBlockInNAND) (struct yaffs_DeviceStruct *dev,
+- int blockInNAND);
+- int (*initialiseNAND) (struct yaffs_DeviceStruct *dev);
+- int (*deinitialiseNAND) (struct yaffs_DeviceStruct *dev);
++ int (*write_chunk_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ const yaffs_spare *spare);
++ int (*read_chunk_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, __u8 *data,
++ yaffs_spare *spare);
++ int (*erase_fn) (struct yaffs_dev_s *dev,
++ int flash_block);
++ int (*initialise_flash_fn) (struct yaffs_dev_s *dev);
++ int (*deinitialise_flash_fn) (struct yaffs_dev_s *dev);
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+- int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+- int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 *data,
+- yaffs_ExtendedTags *tags);
+- int (*markNANDBlockBad) (struct yaffs_DeviceStruct *dev, int blockNo);
+- int (*queryNANDBlock) (struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
+-#endif
+-
+- int isYaffs2;
+-
+- /* The removeObjectCallback function must be supplied by OS flavours that
+- * need it. The Linux kernel does not use this, but yaffs direct does use
+- * it to implement the faster readdir
++ int (*write_chunk_tags_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ const yaffs_ext_tags *tags);
++ int (*read_chunk_tags_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, __u8 *data,
++ yaffs_ext_tags *tags);
++ int (*bad_block_fn) (struct yaffs_dev_s *dev, int block_no);
++ int (*query_block_fn) (struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
++#endif
++
++ /* The remove_obj_fn function must be supplied by OS flavours that
++ * need it.
++ * yaffs direct uses it to implement the faster readdir.
++ * Linux uses it to protect the directory during unlocking.
+ */
+- void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
++ void (*remove_obj_fn)(struct yaffs_obj_s *obj);
+
+- /* Callback to mark the superblock dirsty */
+- void (*markSuperBlockDirty)(void *superblock);
++ /* Callback to mark the superblock dirty */
++ void (*sb_dirty_fn)(struct yaffs_dev_s *dev);
++
++ /* Callback to control garbage collection. */
++ unsigned (*gc_control)(struct yaffs_dev_s *dev);
++
++ /* Debug control flags. Don't use unless you know what you're doing */
++ int use_header_file_size; /* Flag to determine if we should use file sizes from the header */
++ int disable_lazy_load; /* Disable lazy loading on this device */
++ int wide_tnodes_disabled; /* Set to disable wide tnodes */
++ int disable_soft_del; /* yaffs 1 only: Set to disable the use of softdeletion. */
++
++ int defered_dir_update; /* Set to defer directory updates */
+
+- int wideTnodesDisabled; /* Set to disable wide tnodes */
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ int auto_unicode;
++#endif
++ int always_check_erased; /* Force chunk erased check always on */
++};
+
+- YCHAR *pathDividers; /* String of legal path dividers */
++typedef struct yaffs_param_s yaffs_param_t;
+
++struct yaffs_dev_s {
++ struct yaffs_param_s param;
+
+- /* End of stuff that must be set before initialisation. */
++ /* Context storage. Holds extra OS specific data for this device */
+
+- /* Checkpoint control. Can be set before or after initialisation */
+- __u8 skipCheckpointRead;
+- __u8 skipCheckpointWrite;
++ void *os_context;
++ void *driver_context;
++
++ struct ylist_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
++ int data_bytes_per_chunk;
+
+- __u16 chunkGroupBits; /* 0 for devices <= 32MB. else log2(nchunks) - 16 */
+- __u16 chunkGroupSize; /* == 2^^chunkGroupBits */
++ /* Non-wide tnode stuff */
++ __u16 chunk_grp_bits; /* Number of bits that need to be resolved if
++ * the tnodes are not wide enough.
++ */
++ __u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+- __u32 tnodeWidth;
+- __u32 tnodeMask;
++ __u32 tnode_width;
++ __u32 tnode_mask;
++ __u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+- __u32 chunkShift; /* Shift value */
+- __u32 chunkDiv; /* Divisor after shifting: 1 for power-of-2 sizes */
+- __u32 chunkMask; /* Mask to use for power-of-2 case */
+-
+- /* Stuff to handle inband tags */
+- int inbandTags;
+- __u32 totalBytesPerChunk;
+-
+-#ifdef __KERNEL__
+-
+- struct semaphore sem; /* Semaphore for waiting on erasure.*/
+- struct semaphore grossLock; /* Gross locking semaphore */
+- __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
+- * at compile time so we have to allocate it.
+- */
+- void (*putSuperFunc) (struct super_block *sb);
+-#endif
++ __u32 chunk_shift; /* Shift value */
++ __u32 chunk_div; /* Divisor after shifting: 1 for power-of-2 sizes */
++ __u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+- int isMounted;
+
+- int isCheckpointed;
++
++ int is_mounted;
++ int read_only;
++ int is_checkpointed;
+
+
+ /* Stuff to support block offsetting to support start block zero */
+- int internalStartBlock;
+- int internalEndBlock;
+- int blockOffset;
+- int chunkOffset;
++ int internal_start_block;
++ int internal_end_block;
++ int block_offset;
++ int chunk_offset;
+
+
+ /* Runtime checkpointing stuff */
+- int checkpointPageSequence; /* running sequence number of checkpoint pages */
+- int checkpointByteCount;
+- int checkpointByteOffset;
+- __u8 *checkpointBuffer;
+- int checkpointOpenForWrite;
+- int blocksInCheckpoint;
+- int checkpointCurrentChunk;
+- int checkpointCurrentBlock;
+- int checkpointNextBlock;
+- int *checkpointBlockList;
+- int checkpointMaxBlocks;
+- __u32 checkpointSum;
+- __u32 checkpointXor;
++ int checkpt_page_seq; /* running sequence number of checkpoint pages */
++ int checkpt_byte_count;
++ int checkpt_byte_offs;
++ __u8 *checkpt_buffer;
++ int checkpt_open_write;
++ int blocks_in_checkpt;
++ int checkpt_cur_chunk;
++ int checkpt_cur_block;
++ int checkpt_next_block;
++ int *checkpt_block_list;
++ int checkpt_max_blocks;
++ __u32 checkpt_sum;
++ __u32 checkpt_xor;
+
+- int nCheckpointBlocksRequired; /* Number of blocks needed to store current checkpoint set */
++ int checkpoint_blocks_required; /* Number of blocks needed to store current checkpoint set */
+
+ /* Block Info */
+- yaffs_BlockInfo *blockInfo;
+- __u8 *chunkBits; /* bitmap of chunks in use */
+- unsigned blockInfoAlt:1; /* was allocated using alternative strategy */
+- unsigned chunkBitsAlt:1; /* was allocated using alternative strategy */
+- int chunkBitmapStride; /* Number of bytes of chunkBits per block.
+- * Must be consistent with nChunksPerBlock.
++ yaffs_block_info_t *block_info;
++ __u8 *chunk_bits; /* bitmap of chunks in use */
++ unsigned block_info_alt:1; /* was allocated using alternative strategy */
++ unsigned chunk_bits_alt:1; /* was allocated using alternative strategy */
++ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
++ * Must be consistent with chunks_per_block.
+ */
+
+- int nErasedBlocks;
+- int allocationBlock; /* Current block being allocated off */
+- __u32 allocationPage;
+- int allocationBlockFinder; /* Used to search for next allocation block */
+-
+- /* Runtime state */
+- int nTnodesCreated;
+- yaffs_Tnode *freeTnodes;
+- int nFreeTnodes;
+- yaffs_TnodeList *allocatedTnodeList;
+-
+- int isDoingGC;
+- int gcBlock;
+- int gcChunk;
+-
+- int nObjectsCreated;
+- yaffs_Object *freeObjects;
+- int nFreeObjects;
+-
+- int nHardLinks;
+-
+- yaffs_ObjectList *allocatedObjectList;
+-
+- yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
+-
+- int nFreeChunks;
+-
+- int currentDirtyChecker; /* Used to find current dirtiest block */
+-
+- __u32 *gcCleanupList; /* objects to delete at the end of a GC. */
+- int nonAggressiveSkip; /* GC state/mode */
+-
+- /* Statistcs */
+- int nPageWrites;
+- int nPageReads;
+- int nBlockErasures;
+- int nErasureFailures;
+- int nGCCopies;
+- int garbageCollections;
+- int passiveGarbageCollections;
+- int nRetriedWrites;
+- int nRetiredBlocks;
+- int eccFixed;
+- int eccUnfixed;
+- int tagsEccFixed;
+- int tagsEccUnfixed;
+- int nDeletions;
+- int nUnmarkedDeletions;
+-
+- int hasPendingPrioritisedGCs; /* We think this device might have pending prioritised gcs */
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ __u32 alloc_page;
++ int alloc_block_finder; /* Used to search for next allocation block */
++
++ /* Object and Tnode memory management */
++ void *allocator;
++ int n_obj;
++ int n_tnodes;
++
++ int n_hardlinks;
++
++ yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
++ __u32 bucket_finder;
++
++ int n_free_chunks;
++
++ /* Garbage collection control */
++ __u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
++ __u32 n_clean_ups;
++
++ unsigned has_pending_prioritised_gc; /* We think this device might have pending prioritised gcs */
++ unsigned gc_disable;
++ unsigned gc_block_finder;
++ unsigned gc_dirtiest;
++ unsigned gc_pages_in_use;
++ unsigned gc_not_done;
++ unsigned gc_block;
++ unsigned gc_chunk;
++ unsigned gc_skip;
+
+ /* Special directories */
+- yaffs_Object *rootDir;
+- yaffs_Object *lostNFoundDir;
++ yaffs_obj_t *root_dir;
++ yaffs_obj_t *lost_n_found;
+
+ /* Buffer areas for storing data to recover from write failures TODO
+- * __u8 bufferedData[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+- * yaffs_Spare bufferedSpare[YAFFS_CHUNKS_PER_BLOCK];
++ * __u8 buffered_data[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
++ * yaffs_spare buffered_spare[YAFFS_CHUNKS_PER_BLOCK];
+ */
+
+- int bufferedBlock; /* Which block is buffered here? */
+- int doingBufferedBlockRewrite;
+-
+- yaffs_ChunkCache *srCache;
+- int srLastUse;
++ int buffered_block; /* Which block is buffered here? */
++ int doing_buffered_block_rewrite;
+
+- int cacheHits;
++ yaffs_cache_t *cache;
++ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files.*/
+- yaffs_Object *unlinkedDir; /* Directory where unlinked and deleted files live. */
+- yaffs_Object *deletedDir; /* Directory where deleted objects are sent to disappear. */
+- yaffs_Object *unlinkedDeletion; /* Current file being background deleted.*/
+- int nDeletedFiles; /* Count of files awaiting deletion;*/
+- int nUnlinkedFiles; /* Count of unlinked files. */
+- int nBackgroundDeletions; /* Count of background deletions. */
+-
++ yaffs_obj_t *unlinked_dir; /* Directory where unlinked and deleted files live. */
++ yaffs_obj_t *del_dir; /* Directory where deleted objects are sent to disappear. */
++ yaffs_obj_t *unlinked_deletion; /* Current file being background deleted.*/
++ int n_deleted_files; /* Count of files awaiting deletion;*/
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+- yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
+- int maxTemp;
+- int tempInUse;
+- int unmanagedTempAllocations;
+- int unmanagedTempDeallocations;
++ yaffs_buffer_t temp_buffer[YAFFS_N_TEMP_BUFFERS];
++ int max_temp;
++ int temp_in_use;
++ int unmanaged_buffer_allocs;
++ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+- unsigned sequenceNumber; /* Sequence number of currently allocating block */
+- unsigned oldestDirtySequence;
++ unsigned seq_number; /* Sequence number of currently allocating block */
++ unsigned oldest_dirty_seq;
++ unsigned oldest_dirty_block;
++
++ /* Block refreshing */
++ int refresh_skip; /* A skip down counter. Refresh happens when this gets to zero. */
++
++ /* Dirty directory handling */
++ struct ylist_head dirty_dirs; /* List of dirty directories */
++
++
++ /* Statistcs */
++ __u32 n_page_writes;
++ __u32 n_page_reads;
++ __u32 n_erasures;
++ __u32 n_erase_failures;
++ __u32 n_gc_copies;
++ __u32 all_gcs;
++ __u32 passive_gc_count;
++ __u32 oldest_dirty_gc_count;
++ __u32 n_gc_blocks;
++ __u32 bg_gcs;
++ __u32 n_retired_writes;
++ __u32 n_retired_blocks;
++ __u32 n_ecc_fixed;
++ __u32 n_ecc_unfixed;
++ __u32 n_tags_ecc_fixed;
++ __u32 n_tags_ecc_unfixed;
++ __u32 n_deletions;
++ __u32 n_unmarked_deletions;
++ __u32 refresh_count;
++ __u32 cache_hits;
+
+ };
+
+-typedef struct yaffs_DeviceStruct yaffs_Device;
++typedef struct yaffs_dev_s yaffs_dev_t;
+
+ /* The static layout of block usage etc is stored in the super block header */
+ typedef struct {
+ int StructType;
+ int version;
+- int checkpointStartBlock;
+- int checkpointEndBlock;
+- int startBlock;
+- int endBlock;
++ int checkpt_start_block;
++ int checkpt_end_block;
++ int start_block;
++ int end_block;
+ int rfu[100];
+-} yaffs_SuperBlockHeader;
++} yaffs_sb_header;
+
+ /* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+ typedef struct {
+- int structType;
+- int nErasedBlocks;
+- int allocationBlock; /* Current block being allocated off */
+- __u32 allocationPage;
+- int nFreeChunks;
+-
+- int nDeletedFiles; /* Count of files awaiting deletion;*/
+- int nUnlinkedFiles; /* Count of unlinked files. */
+- int nBackgroundDeletions; /* Count of background deletions. */
++ int struct_type;
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ __u32 alloc_page;
++ int n_free_chunks;
++
++ int n_deleted_files; /* Count of files awaiting deletion;*/
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+- unsigned sequenceNumber; /* Sequence number of currently allocating block */
+- unsigned oldestDirtySequence;
++ unsigned seq_number; /* Sequence number of currently allocating block */
+
+-} yaffs_CheckpointDevice;
++} yaffs_checkpt_dev_t;
+
+
+ typedef struct {
+- int structType;
++ int struct_type;
+ __u32 magic;
+ __u32 version;
+ __u32 head;
+-} yaffs_CheckpointValidity;
++} yaffs_checkpt_validty_t;
++
++
++struct yaffs_shadow_fixer_s {
++ int obj_id;
++ int shadowed_id;
++ struct yaffs_shadow_fixer_s *next;
++};
++
++/* Structure for doing xattr modifications */
++typedef struct {
++ int set; /* If 0 then this is a deletion */
++ const YCHAR *name;
++ const void *data;
++ int size;
++ int flags;
++ int result;
++}yaffs_xattr_mod;
+
+
+ /*----------------------- YAFFS Functions -----------------------*/
+
+-int yaffs_GutsInitialise(yaffs_Device *dev);
+-void yaffs_Deinitialise(yaffs_Device *dev);
++int yaffs_guts_initialise(yaffs_dev_t *dev);
++void yaffs_deinitialise(yaffs_dev_t *dev);
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev);
++int yaffs_get_n_free_chunks(yaffs_dev_t *dev);
+
+-int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
+- yaffs_Object *newDir, const YCHAR *newName);
++int yaffs_rename_obj(yaffs_obj_t *old_dir, const YCHAR *old_name,
++ yaffs_obj_t *new_dir, const YCHAR *new_name);
+
+-int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name);
+-int yaffs_DeleteObject(yaffs_Object *obj);
++int yaffs_unlinker(yaffs_obj_t *dir, const YCHAR *name);
++int yaffs_del_obj(yaffs_obj_t *obj);
+
+-int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize);
+-int yaffs_GetObjectFileLength(yaffs_Object *obj);
+-int yaffs_GetObjectInode(yaffs_Object *obj);
+-unsigned yaffs_GetObjectType(yaffs_Object *obj);
+-int yaffs_GetObjectLinkCount(yaffs_Object *obj);
++int yaffs_get_obj_name(yaffs_obj_t *obj, YCHAR *name, int buffer_size);
++int yaffs_get_obj_length(yaffs_obj_t *obj);
++int yaffs_get_obj_inode(yaffs_obj_t *obj);
++unsigned yaffs_get_obj_type(yaffs_obj_t *obj);
++int yaffs_get_obj_link_count(yaffs_obj_t *obj);
+
+-int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr);
+-int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr);
++int yaffs_set_attribs(yaffs_obj_t *obj, struct iattr *attr);
++int yaffs_get_attribs(yaffs_obj_t *obj, struct iattr *attr);
+
+ /* File operations */
+-int yaffs_ReadDataFromFile(yaffs_Object *obj, __u8 *buffer, loff_t offset,
+- int nBytes);
+-int yaffs_WriteDataToFile(yaffs_Object *obj, const __u8 *buffer, loff_t offset,
+- int nBytes, int writeThrough);
+-int yaffs_ResizeFile(yaffs_Object *obj, loff_t newSize);
++int yaffs_file_rd(yaffs_obj_t *obj, __u8 *buffer, loff_t offset,
++ int n_bytes);
++int yaffs_wr_file(yaffs_obj_t *obj, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++int yaffs_resize_file(yaffs_obj_t *obj, loff_t new_size);
+
+-yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_file(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid);
+-int yaffs_FlushFile(yaffs_Object *obj, int updateTime);
++
++int yaffs_flush_file(yaffs_obj_t *obj, int update_time, int data_sync);
+
+ /* Flushing and checkpointing */
+-void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
++void yaffs_flush_whole_cache(yaffs_dev_t *dev);
+
+-int yaffs_CheckpointSave(yaffs_Device *dev);
+-int yaffs_CheckpointRestore(yaffs_Device *dev);
++int yaffs_checkpoint_save(yaffs_dev_t *dev);
++int yaffs_checkpoint_restore(yaffs_dev_t *dev);
+
+ /* Directory operations */
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_dir(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid);
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object *theDir, const YCHAR *name);
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
+- int (*fn) (yaffs_Object *));
++yaffs_obj_t *yaffs_find_by_name(yaffs_obj_t *the_dir, const YCHAR *name);
++int yaffs_ApplyToDirectoryChildren(yaffs_obj_t *the_dir,
++ int (*fn) (yaffs_obj_t *));
+
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number);
++yaffs_obj_t *yaffs_find_by_number(yaffs_dev_t *dev, __u32 number);
+
+ /* Link operations */
+-yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
+- yaffs_Object *equivalentObject);
++yaffs_obj_t *yaffs_link_obj(yaffs_obj_t *parent, const YCHAR *name,
++ yaffs_obj_t *equiv_obj);
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj);
++yaffs_obj_t *yaffs_get_equivalent_obj(yaffs_obj_t *obj);
+
+ /* Symlink operations */
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_symlink(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid,
+ const YCHAR *alias);
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj);
++YCHAR *yaffs_get_symlink_alias(yaffs_obj_t *obj);
+
+ /* Special inodes (fifos, sockets and devices) */
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_special(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
+
++
++int yaffs_set_xattrib(yaffs_obj_t *obj, const YCHAR *name, const void * value, int size, int flags);
++int yaffs_get_xattrib(yaffs_obj_t *obj, const YCHAR *name, void *value, int size);
++int yaffs_list_xattrib(yaffs_obj_t *obj, char *buffer, int size);
++int yaffs_remove_xattrib(yaffs_obj_t *obj, const YCHAR *name);
++
+ /* Special directories */
+-yaffs_Object *yaffs_Root(yaffs_Device *dev);
+-yaffs_Object *yaffs_LostNFound(yaffs_Device *dev);
++yaffs_obj_t *yaffs_root(yaffs_dev_t *dev);
++yaffs_obj_t *yaffs_lost_n_found(yaffs_dev_t *dev);
+
+ #ifdef CONFIG_YAFFS_WINCE
+ /* CONFIG_YAFFS_WINCE special stuff */
+-void yfsd_WinFileTimeNow(__u32 target[2]);
++void yfsd_win_file_time_now(__u32 target[2]);
+ #endif
+
+-#ifdef __KERNEL__
++void yaffs_handle_defered_free(yaffs_obj_t *obj);
+
+-void yaffs_HandleDeferedFree(yaffs_Object *obj);
+-#endif
++void yaffs_update_dirty_dirs(yaffs_dev_t *dev);
++
++int yaffs_bg_gc(yaffs_dev_t *dev, unsigned urgency);
+
+ /* Debug dump */
+-int yaffs_DumpObject(yaffs_Object *obj);
++int yaffs_dump_obj(yaffs_obj_t *obj);
+
+-void yaffs_GutsTest(yaffs_Device *dev);
++void yaffs_guts_test(yaffs_dev_t *dev);
+
+-/* A few useful functions */
+-void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
+-void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn);
+-int yaffs_CheckFF(__u8 *buffer, int nBytes);
+-void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
++/* A few useful functions to be used within the core files*/
++void yaffs_chunk_del(yaffs_dev_t *dev, int chunk_id, int mark_flash, int lyn);
++int yaffs_check_ff(__u8 *buffer, int n_bytes);
++void yaffs_handle_chunk_error(yaffs_dev_t *dev, yaffs_block_info_t *bi);
++
++__u8 *yaffs_get_temp_buffer(yaffs_dev_t *dev, int line_no);
++void yaffs_release_temp_buffer(yaffs_dev_t *dev, __u8 *buffer, int line_no);
++
++yaffs_obj_t *yaffs_find_or_create_by_number(yaffs_dev_t *dev,
++ int number,
++ yaffs_obj_type type);
++int yaffs_put_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ int nand_chunk, int in_scan);
++void yaffs_set_obj_name(yaffs_obj_t *obj, const YCHAR *name);
++void yaffs_set_obj_name_from_oh(yaffs_obj_t *obj, const yaffs_obj_header *oh);
++void yaffs_add_obj_to_dir(yaffs_obj_t *directory,
++ yaffs_obj_t *obj);
++YCHAR *yaffs_clone_str(const YCHAR *str);
++void yaffs_link_fixup(yaffs_dev_t *dev, yaffs_obj_t *hard_list);
++void yaffs_block_became_dirty(yaffs_dev_t *dev, int block_no);
++int yaffs_update_oh(yaffs_obj_t *in, const YCHAR *name,
++ int force, int is_shrink, int shadows,
++ yaffs_xattr_mod *xop);
++void yaffs_handle_shadowed_obj(yaffs_dev_t *dev, int obj_id,
++ int backward_scanning);
++int yaffs_check_alloc_available(yaffs_dev_t *dev, int n_chunks);
++yaffs_tnode_t *yaffs_get_tnode(yaffs_dev_t *dev);
++yaffs_tnode_t *yaffs_add_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id,
++ yaffs_tnode_t *passed_tn);
++
++int yaffs_do_file_wr(yaffs_obj_t *in, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++void yaffs_resize_file_down( yaffs_obj_t *obj, loff_t new_size);
++void yaffs_skip_rest_of_block(yaffs_dev_t *dev);
++
++int yaffs_count_free_chunks(yaffs_dev_t *dev);
++
++yaffs_tnode_t *yaffs_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id);
+
+-__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo);
+-void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, int lineNo);
++__u32 yaffs_get_group_base(yaffs_dev_t *dev, yaffs_tnode_t *tn, unsigned pos);
+
+ #endif
+--- a/fs/yaffs2/yaffsinterface.h
++++ b/fs/yaffs2/yaffsinterface.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -16,6 +16,6 @@
+ #ifndef __YAFFSINTERFACE_H__
+ #define __YAFFSINTERFACE_H__
+
+-int yaffs_Initialise(unsigned nBlocks);
++int yaffs_initialise(unsigned nBlocks);
+
+ #endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_linux_allocator.c
+@@ -0,0 +1,200 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ *
++ * Note: Tis code is currently unused. Being checked in in case it becomes useful.
++ */
++
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++#include "yaffs_linux.h"
++/*
++ * Start out with the same allocator as yaffs direct.
++ * Todo: Change to Linux slab allocator.
++ */
++
++
++
++#define NAMELEN 20
++struct yaffs_AllocatorStruct {
++ char tnode_name[NAMELEN+1];
++ char object_name[NAMELEN+1];
++ struct kmem_cache *tnode_cache;
++ struct kmem_cache *object_cache;
++};
++
++typedef struct yaffs_AllocatorStruct yaffs_Allocator;
++
++int mount_id;
++
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++
++ T(YAFFS_TRACE_ALLOCATE,(TSTR("Deinitialising yaffs allocator\n")));
++
++ if(allocator){
++ if(allocator->tnode_cache){
++ kmem_cache_destroy(allocator->tnode_cache);
++ allocator->tnode_cache = NULL;
++ } else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("NULL tnode cache\n")));
++ YBUG();
++ }
++
++ if(allocator->object_cache){
++ kmem_cache_destroy(allocator->object_cache);
++ allocator->object_cache = NULL;
++ } else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("NULL object cache\n")));
++ YBUG();
++ }
++
++ YFREE(allocator);
++
++ } else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("Deinitialising NULL allocator\n")));
++ YBUG();
++ }
++ dev->allocator = NULL;
++}
++
++
++static void fake_ctor0(void *data){data = data;}
++static void fake_ctor1(void *data){data = data;}
++static void fake_ctor2(void *data){data = data;}
++static void fake_ctor3(void *data){data = data;}
++static void fake_ctor4(void *data){data = data;}
++static void fake_ctor5(void *data){data = data;}
++static void fake_ctor6(void *data){data = data;}
++static void fake_ctor7(void *data){data = data;}
++static void fake_ctor8(void *data){data = data;}
++static void fake_ctor9(void *data){data = data;}
++
++static void (*fake_ctor_list[10]) (void *) = {
++ fake_ctor0,
++ fake_ctor1,
++ fake_ctor2,
++ fake_ctor3,
++ fake_ctor4,
++ fake_ctor5,
++ fake_ctor6,
++ fake_ctor7,
++ fake_ctor8,
++ fake_ctor9,
++};
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator;
++ unsigned mount_id = yaffs_dev_to_lc(dev)->mount_id;
++
++ T(YAFFS_TRACE_ALLOCATE,(TSTR("Initialising yaffs allocator\n")));
++
++ if(dev->allocator)
++ YBUG();
++ else if(mount_id >= 10){
++ T(YAFFS_TRACE_ALWAYS,(TSTR("Bad mount_id %u\n"),mount_id));
++ } else {
++ allocator = YMALLOC(sizeof(yaffs_Allocator));
++ memset(allocator,0,sizeof(yaffs_Allocator));
++ dev->allocator = allocator;
++
++ if(!dev->allocator){
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs allocator creation failed\n")));
++ YBUG();
++ return;
++
++ }
++
++ sprintf(allocator->tnode_name,"yaffs_t_%u",mount_id);
++ sprintf(allocator->object_name,"yaffs_o_%u",mount_id);
++
++ allocator->tnode_cache =
++ kmem_cache_create(allocator->tnode_name,
++ dev->tnode_size,
++ 0, 0,
++ fake_ctor_list[mount_id]);
++ if(allocator->tnode_cache)
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("tnode cache \"%s\" %p\n"),
++ allocator->tnode_name,allocator->tnode_cache));
++ else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs cache creation failed\n")));
++ YBUG();
++ }
++
++
++ allocator->object_cache =
++ kmem_cache_create(allocator->object_name,
++ sizeof(yaffs_obj_t),
++ 0, 0,
++ fake_ctor_list[mount_id]);
++
++ if(allocator->object_cache)
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("object cache \"%s\" %p\n"),
++ allocator->object_name,allocator->object_cache));
++
++ else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs cache creation failed\n")));
++ YBUG();
++ }
++ }
++}
++
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ if(!allocator || !allocator->tnode_cache){
++ YBUG();
++ return NULL;
++ }
++ return kmem_cache_alloc(allocator->tnode_cache, GFP_NOFS);
++}
++
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ kmem_cache_free(allocator->tnode_cache,tn);
++}
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ if(!allocator){
++ YBUG();
++ return NULL;
++ }
++ if(!allocator->object_cache){
++ YBUG();
++ return NULL;
++ }
++ return kmem_cache_alloc(allocator->object_cache, GFP_NOFS);
++}
++
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ kmem_cache_free(allocator->object_cache,obj);
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_linux.h
+@@ -0,0 +1,43 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_LINUX_H__
++#define __YAFFS_LINUX_H__
++
++#include "devextras.h"
++#include "yportenv.h"
++
++struct yaffs_LinuxContext {
++ struct ylist_head contextList; /* List of these we have mounted */
++ struct yaffs_dev_s *dev;
++ struct super_block * superBlock;
++ struct task_struct *bgThread; /* Background thread for this device */
++ int bgRunning;
++ struct semaphore grossLock; /* Gross locking semaphore */
++ __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
++ * at compile time so we have to allocate it.
++ */
++ struct ylist_head searchContexts;
++ void (*putSuperFunc)(struct super_block *sb);
++
++ struct task_struct *readdirProcess;
++ unsigned mount_id;
++};
++
++#define yaffs_dev_to_lc(dev) ((struct yaffs_LinuxContext *)((dev)->os_context))
++#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
++
++#endif
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_list.h
+@@ -0,0 +1,127 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This file is just holds extra declarations of macros that would normally
++ * be providesd in the Linux kernel. These macros have been written from
++ * scratch but are functionally equivalent to the Linux ones.
++ *
++ */
++
++#ifndef __YAFFS_LIST_H__
++#define __YAFFS_LIST_H__
++
++
++#include "yportenv.h"
++
++/*
++ * This is a simple doubly linked list implementation that matches the
++ * way the Linux kernel doubly linked list implementation works.
++ */
++
++struct ylist_head {
++ struct ylist_head *next; /* next in chain */
++ struct ylist_head *prev; /* previous in chain */
++};
++
++
++/* Initialise a static list */
++#define YLIST_HEAD(name) \
++struct ylist_head name = { &(name), &(name)}
++
++
++
++/* Initialise a list head to an empty list */
++#define YINIT_LIST_HEAD(p) \
++do { \
++ (p)->next = (p);\
++ (p)->prev = (p); \
++} while (0)
++
++
++/* Add an element to a list */
++static Y_INLINE void ylist_add(struct ylist_head *newEntry,
++ struct ylist_head *list)
++{
++ struct ylist_head *listNext = list->next;
++
++ list->next = newEntry;
++ newEntry->prev = list;
++ newEntry->next = listNext;
++ listNext->prev = newEntry;
++
++}
++
++static Y_INLINE void ylist_add_tail(struct ylist_head *newEntry,
++ struct ylist_head *list)
++{
++ struct ylist_head *listPrev = list->prev;
++
++ list->prev = newEntry;
++ newEntry->next = list;
++ newEntry->prev = listPrev;
++ listPrev->next = newEntry;
++
++}
++
++
++/* Take an element out of its current list, with or without
++ * reinitialising the links.of the entry*/
++static Y_INLINE void ylist_del(struct ylist_head *entry)
++{
++ struct ylist_head *listNext = entry->next;
++ struct ylist_head *listPrev = entry->prev;
++
++ listNext->prev = listPrev;
++ listPrev->next = listNext;
++
++}
++
++static Y_INLINE void ylist_del_init(struct ylist_head *entry)
++{
++ ylist_del(entry);
++ entry->next = entry->prev = entry;
++}
++
++
++/* Test if the list is empty */
++static Y_INLINE int ylist_empty(struct ylist_head *entry)
++{
++ return (entry->next == entry);
++}
++
++
++/* ylist_entry takes a pointer to a list entry and offsets it to that
++ * we can find a pointer to the object it is embedded in.
++ */
++
++
++#define ylist_entry(entry, type, member) \
++ ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
++
++
++/* ylist_for_each and list_for_each_safe iterate over lists.
++ * ylist_for_each_safe uses temporary storage to make the list delete safe
++ */
++
++#define ylist_for_each(itervar, list) \
++ for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
++
++#define ylist_for_each_safe(itervar, saveVar, list) \
++ for (itervar = (list)->next, saveVar = (list)->next->next; \
++ itervar != (list); itervar = saveVar, saveVar = saveVar->next)
++
++
++#endif
+--- a/fs/yaffs2/yaffs_mtdif1.c
++++ b/fs/yaffs2/yaffs_mtdif1.c
+@@ -2,7 +2,7 @@
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND.
+ *
+- * Copyright (C) 2002 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -18,15 +18,17 @@
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+- * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
++ * and the yaffs_tags_tCompatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+- * We assume the MTD layer is performing ECC (useNANDECC is true).
++ * We assume the MTD layer is performing ECC (use_nand_ecc is true).
+ */
+
+ #include "yportenv.h"
++#include "yaffs_trace.h"
+ #include "yaffs_guts.h"
+ #include "yaffs_packedtags1.h"
+-#include "yaffs_tagscompat.h" /* for yaffs_CalcTagsECC */
++#include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */
++#include "yaffs_linux.h"
+
+ #include "linux/kernel.h"
+ #include "linux/version.h"
+@@ -36,8 +38,6 @@
+ /* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+
+-const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.10 2009-03-09 07:41:10 charles Exp $";
+-
+ #ifndef CONFIG_YAFFS_9BYTE_TAGS
+ # define YTAG1_SIZE 8
+ #else
+@@ -51,12 +51,12 @@ const char *yaffs_mtdif1_c_version = "$I
+ * adjust 'oobfree' to match your existing Yaffs data.
+ *
+ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
+- * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
++ * page_status byte (at NAND spare offset 4) scattered/gathered from/to
+ * the 9th byte.
+ *
+ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
+- * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
+- * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
++ * We have/need PackedTags1 plus page_status: T0,T1,T2,T3,T4,T5,T6,T7,P
++ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the page_status
+ * byte and B is the small-page bad-block indicator byte.
+ */
+ static struct nand_ecclayout nand_oob_16 = {
+@@ -88,42 +88,40 @@ static struct nand_ecclayout nand_oob_16
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND, const __u8 *data, const yaffs_ExtendedTags *etags)
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_dev_t *dev,
++ int nand_chunk, const __u8 *data, const yaffs_ext_tags *etags)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int chunkBytes = dev->data_bytes_per_chunk;
++ loff_t addr = ((loff_t)nand_chunk) * chunkBytes;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+
+- /* we assume that PackedTags1 and yaffs_Tags are compatible */
++ /* we assume that PackedTags1 and yaffs_tags_t are compatible */
+ compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
+- compile_time_assertion(sizeof(yaffs_Tags) == 8);
+-
+- dev->nPageWrites++;
++ compile_time_assertion(sizeof(yaffs_tags_t) == 8);
+
+ yaffs_PackTags1(&pt1, etags);
+- yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
++ yaffs_calc_tags_ecc((yaffs_tags_t *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+- * etags, one with chunkDeleted set. However, we need to update the
++ * etags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+ #ifndef CONFIG_YAFFS_9BYTE_TAGS
+- if (etags->chunkDeleted) {
++ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+ #else
+ ((__u8 *)&pt1)[8] = 0xff;
+- if (etags->chunkDeleted) {
++ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+- /* zero pageStatus byte to indicate deleted */
++ /* zero page_status byte to indicate deleted */
+ ((__u8 *)&pt1)[8] = 0;
+ }
+ #endif
+@@ -137,20 +135,20 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "write_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
++ T(YAFFS_TRACE_MTD,
++ (TSTR("write_oob failed, chunk %d, mtd error %d"TENDSTR),
++ nand_chunk, retval));
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+ }
+
+-/* Return with empty ExtendedTags but add eccResult.
++/* Return with empty ExtendedTags but add ecc_result.
+ */
+-static int rettags(yaffs_ExtendedTags *etags, int eccResult, int retval)
++static int rettags(yaffs_ext_tags *etags, int ecc_result, int retval)
+ {
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+- etags->eccResult = eccResult;
++ etags->ecc_result = ecc_result;
+ }
+ return retval;
+ }
+@@ -158,30 +156,28 @@ static int rettags(yaffs_ExtendedTags *e
+ /* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+- * all members except eccResult and blockBad are zeroed.
++ * all members except ecc_result and block_bad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the PackedTags1 mini-ECC (correct if necessary/possible)
+ * - Convert PackedTags1 to ExtendedTags
+- * - Update eccResult and blockBad members to refect state.
++ * - Update ecc_result and block_bad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND, __u8 *data, yaffs_ExtendedTags *etags)
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev,
++ int nand_chunk, __u8 *data, yaffs_ext_tags *etags)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int chunkBytes = dev->data_bytes_per_chunk;
++ loff_t addr = ((loff_t)nand_chunk) * chunkBytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+ int deleted;
+
+- dev->nPageReads++;
+-
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunkBytes : 0;
+@@ -200,9 +196,9 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "read_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
++ T(YAFFS_TRACE_MTD,
++ (TSTR("read_oob failed, chunk %d, mtd error %d"TENDSTR),
++ nand_chunk, retval));
+ }
+
+ switch (retval) {
+@@ -213,23 +209,23 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+- dev->eccFixed++;
++ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+- dev->eccUnfixed++;
++ dev->n_ecc_unfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+- etags->blockBad = (mtd->block_isbad)(mtd, addr);
++ etags->block_bad = (mtd->block_isbad)(mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+- if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
+- /* when blank, upper layers want eccResult to be <= NO_ERROR */
++ if (yaffs_check_ff((__u8 *)&pt1, 8)) {
++ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+
+@@ -241,37 +237,37 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+ #else
+- deleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
++ deleted = (yaffs_count_bits(((__u8 *)&pt1)[8]) < 7);
+ #endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+- retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
++ retval = yaffs_check_tags_ecc((yaffs_tags_t *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+- dev->tagsEccFixed++;
++ dev->n_tags_ecc_fixed++;
+ if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+- dev->tagsEccUnfixed++;
++ dev->n_tags_ecc_unfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+- * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
++ * [set shouldBeFF just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.shouldBeFF = 0xFFFFFFFF;
+- yaffs_UnpackTags1(etags, &pt1);
+- etags->eccResult = eccres;
++ yaffs_unpack_tags1(etags, &pt1);
++ etags->ecc_result = eccres;
+
+ /* Set deleted state */
+- etags->chunkDeleted = deleted;
++ etags->is_deleted = deleted;
+ return YAFFS_OK;
+ }
+
+@@ -282,15 +278,15 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++int nandmtd1_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad\n", blockNo);
++ T(YAFFS_TRACE_BAD_BLOCKS,(TSTR("marking block %d bad"TENDSTR), block_no));
+
+- retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
++ retval = mtd->block_markbad(mtd, (loff_t)blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+ }
+
+@@ -305,9 +301,9 @@ static int nandmtd1_TestPrerequists(stru
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+- yaffs_trace(YAFFS_TRACE_ERROR,
+- "mtd device has only %d bytes for tags, need %d\n",
+- oobavail, YTAG1_SIZE);
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("mtd device has only %d bytes for tags, need %d"TENDSTR),
++ oobavail, YTAG1_SIZE));
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+@@ -322,13 +318,13 @@ static int nandmtd1_TestPrerequists(stru
+ *
+ * Always returns YAFFS_OK.
+ */
+-int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *pState, __u32 *pSequenceNumber)
++int nandmtd1_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *pState, __u32 *pSequenceNumber)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int chunkNo = blockNo * dev->nChunksPerBlock;
+- loff_t addr = (loff_t)chunkNo * dev->nDataBytesPerChunk;
+- yaffs_ExtendedTags etags;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int chunkNo = block_no * dev->param.chunks_per_block;
++ loff_t addr = (loff_t)chunkNo * dev->data_bytes_per_chunk;
++ yaffs_ext_tags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+@@ -340,17 +336,17 @@ int nandmtd1_QueryNANDBlock(struct yaffs
+ return YAFFS_FAIL;
+
+ retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
+- etags.blockBad = (mtd->block_isbad)(mtd, addr);
+- if (etags.blockBad) {
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+- "block %d is marked bad\n", blockNo);
++ etags.block_bad = (mtd->block_isbad)(mtd, addr);
++ if (etags.block_bad) {
++ T(YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("block %d is marked bad"TENDSTR), block_no));
+ state = YAFFS_BLOCK_STATE_DEAD;
+- } else if (etags.eccResult != YAFFS_ECC_RESULT_NO_ERROR) {
++ } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ /* bad tags, need to look more closely */
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+- } else if (etags.chunkUsed) {
++ } else if (etags.chunk_used) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+- seqnum = etags.sequenceNumber;
++ seqnum = etags.seq_number;
+ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+--- a/fs/yaffs2/yaffs_mtdif1-compat.c
++++ /dev/null
+@@ -1,434 +0,0 @@
+-From ian@brightstareng.com Fri May 18 15:06:49 2007
+-From ian@brightstareng.com Fri May 18 15:08:21 2007
+-Received: from 206.173.66.57.ptr.us.xo.net ([206.173.66.57] helo=zebra.brightstareng.com)
+- by apollo.linkchoose.co.uk with esmtp (Exim 4.60)
+- (envelope-from <ian@brightstareng.com>)
+- id 1Hp380-00011e-T6
+- for david.goodenough@linkchoose.co.uk; Fri, 18 May 2007 15:08:21 +0100
+-Received: from localhost (localhost.localdomain [127.0.0.1])
+- by zebra.brightstareng.com (Postfix) with ESMTP
+- id 4819F28C004; Fri, 18 May 2007 10:07:49 -0400 (EDT)
+-Received: from zebra.brightstareng.com ([127.0.0.1])
+- by localhost (zebra [127.0.0.1]) (amavisd-new, port 10024) with ESMTP
+- id 05328-06; Fri, 18 May 2007 10:07:16 -0400 (EDT)
+-Received: from pippin (unknown [192.168.1.25])
+- by zebra.brightstareng.com (Postfix) with ESMTP
+- id 8BEF528C1BC; Fri, 18 May 2007 10:06:53 -0400 (EDT)
+-From: Ian McDonnell <ian@brightstareng.com>
+-To: David Goodenough <david.goodenough@linkchoose.co.uk>
+-Subject: Re: something tested this time -- yaffs_mtdif1-compat.c
+-Date: Fri, 18 May 2007 10:06:49 -0400
+-User-Agent: KMail/1.9.1
+-References: <200705142207.06909.ian@brightstareng.com> <200705171131.53536.ian@brightstareng.com> <200705181334.32166.david.goodenough@linkchoose.co.uk>
+-In-Reply-To: <200705181334.32166.david.goodenough@linkchoose.co.uk>
+-Cc: Andrea Conti <alyf@alyf.net>,
+- Charles Manning <manningc2@actrix.gen.nz>
+-MIME-Version: 1.0
+-Content-Type: Multipart/Mixed;
+- boundary="Boundary-00=_5LbTGmt62YoutxM"
+-Message-Id: <200705181006.49860.ian@brightstareng.com>
+-X-Virus-Scanned: by amavisd-new at brightstareng.com
+-Status: R
+-X-Status: NT
+-X-KMail-EncryptionState:
+-X-KMail-SignatureState:
+-X-KMail-MDN-Sent:
+-
+---Boundary-00=_5LbTGmt62YoutxM
+-Content-Type: text/plain;
+- charset="iso-8859-15"
+-Content-Transfer-Encoding: 7bit
+-Content-Disposition: inline
+-
+-David, Andrea,
+-
+-On Friday 18 May 2007 08:34, you wrote:
+-> Yea team. With this fix in place (I put it in the wrong place
+-> at first) I can now mount and ls the Yaffs partition without
+-> an error messages!
+-
+-Good news!
+-
+-Attached is a newer yaffs_mtdif1.c with a bandaid to help the
+-2.6.18 and 2.6.19 versions of MTD not trip on the oob read.
+-See the LINUX_VERSION_CODE conditional in
+-nandmtd1_ReadChunkWithTagsFromNAND.
+-
+--imcd
+-
+---Boundary-00=_5LbTGmt62YoutxM
+-Content-Type: text/x-csrc;
+- charset="iso-8859-15";
+- name="yaffs_mtdif1.c"
+-Content-Transfer-Encoding: 7bit
+-Content-Disposition: attachment;
+- filename="yaffs_mtdif1.c"
+-
+-/*
+- * YAFFS: Yet another FFS. A NAND-flash specific file system.
+- * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND.
+- *
+- * Copyright (C) 2002 Aleph One Ltd.
+- * for Toby Churchill Ltd and Brightstar Engineering
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-/*
+- * This module provides the interface between yaffs_nand.c and the
+- * MTD API. This version is used when the MTD interface supports the
+- * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+- * and we have small-page NAND device.
+- *
+- * These functions are invoked via function pointers in yaffs_nand.c.
+- * This replaces functionality provided by functions in yaffs_mtdif.c
+- * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
+- * called in yaffs_mtdif.c when the function pointers are NULL.
+- * We assume the MTD layer is performing ECC (useNANDECC is true).
+- */
+-
+-#include "yportenv.h"
+-#include "yaffs_guts.h"
+-#include "yaffs_packedtags1.h"
+-#include "yaffs_tagscompat.h" // for yaffs_CalcTagsECC
+-
+-#include "linux/kernel.h"
+-#include "linux/version.h"
+-#include "linux/types.h"
+-#include "linux/mtd/mtd.h"
+-
+-/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+-
+-const char *yaffs_mtdif1_c_version = "$Id$";
+-
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+-# define YTAG1_SIZE 8
+-#else
+-# define YTAG1_SIZE 9
+-#endif
+-
+-#if 0
+-/* Use the following nand_ecclayout with MTD when using
+- * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
+- * If you have existing Yaffs images and the byte order differs from this,
+- * adjust 'oobfree' to match your existing Yaffs data.
+- *
+- * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
+- * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
+- * the 9th byte.
+- *
+- * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
+- * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
+- * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
+- * byte and B is the small-page bad-block indicator byte.
+- */
+-static struct nand_ecclayout nand_oob_16 = {
+- .eccbytes = 6,
+- .eccpos = { 8, 9, 10, 13, 14, 15 },
+- .oobavail = 9,
+- .oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
+-};
+-#endif
+-
+-/* Write a chunk (page) of data to NAND.
+- *
+- * Caller always provides ExtendedTags data which are converted to a more
+- * compact (packed) form for storage in NAND. A mini-ECC runs over the
+- * contents of the tags meta-data; used to valid the tags when read.
+- *
+- * - Pack ExtendedTags to PackedTags1 form
+- * - Compute mini-ECC for PackedTags1
+- * - Write data and packed tags to NAND.
+- *
+- * Note: Due to the use of the PackedTags1 meta-data which does not include
+- * a full sequence number (as found in the larger PackedTags2 form) it is
+- * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+- * discarded and dirty. This is not ideal: newer NAND parts are supposed
+- * to be written just once. When Yaffs performs this operation, this
+- * function is called with a NULL data pointer -- calling MTD write_oob
+- * without data is valid usage (2.6.17).
+- *
+- * Any underlying MTD error results in YAFFS_FAIL.
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+- struct mtd_oob_ops ops;
+- yaffs_PackedTags1 pt1;
+- int retval;
+-
+- /* we assume that PackedTags1 and yaffs_Tags are compatible */
+- compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
+- compile_time_assertion(sizeof(yaffs_Tags) == 8);
+-
+- yaffs_PackTags1(&pt1, etags);
+- yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
+-
+- /* When deleting a chunk, the upper layer provides only skeletal
+- * etags, one with chunkDeleted set. However, we need to update the
+- * tags, not erase them completely. So we use the NAND write property
+- * that only zeroed-bits stick and set tag bytes to all-ones and
+- * zero just the (not) deleted bit.
+- */
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+- if (etags->chunkDeleted) {
+- memset(&pt1, 0xff, 8);
+- /* clear delete status bit to indicate deleted */
+- pt1.deleted = 0;
+- }
+-#else
+- ((__u8 *)&pt1)[8] = 0xff;
+- if (etags->chunkDeleted) {
+- memset(&pt1, 0xff, 8);
+- /* zero pageStatus byte to indicate deleted */
+- ((__u8 *)&pt1)[8] = 0;
+- }
+-#endif
+-
+- memset(&ops, 0, sizeof(ops));
+- ops.mode = MTD_OOB_AUTO;
+- ops.len = (data) ? chunkBytes : 0;
+- ops.ooblen = YTAG1_SIZE;
+- ops.datbuf = (__u8 *)data;
+- ops.oobbuf = (__u8 *)&pt1;
+-
+- retval = mtd->write_oob(mtd, addr, &ops);
+- if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "write_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
+- }
+- return retval ? YAFFS_FAIL : YAFFS_OK;
+-}
+-
+-/* Return with empty ExtendedTags but add eccResult.
+- */
+-static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
+-{
+- if (etags) {
+- memset(etags, 0, sizeof(*etags));
+- etags->eccResult = eccResult;
+- }
+- return retval;
+-}
+-
+-/* Read a chunk (page) from NAND.
+- *
+- * Caller expects ExtendedTags data to be usable even on error; that is,
+- * all members except eccResult and blockBad are zeroed.
+- *
+- * - Check ECC results for data (if applicable)
+- * - Check for blank/erased block (return empty ExtendedTags if blank)
+- * - Check the PackedTags1 mini-ECC (correct if necessary/possible)
+- * - Convert PackedTags1 to ExtendedTags
+- * - Update eccResult and blockBad members to refect state.
+- *
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+- int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+- struct mtd_oob_ops ops;
+- yaffs_PackedTags1 pt1;
+- int retval;
+- int deleted;
+-
+- memset(&ops, 0, sizeof(ops));
+- ops.mode = MTD_OOB_AUTO;
+- ops.len = (data) ? chunkBytes : 0;
+- ops.ooblen = YTAG1_SIZE;
+- ops.datbuf = data;
+- ops.oobbuf = (__u8 *)&pt1;
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+- /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+- * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+- */
+- ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+-#endif
+- /* Read page and oob using MTD.
+- * Check status and determine ECC result.
+- */
+- retval = mtd->read_oob(mtd, addr, &ops);
+- if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "read_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
+- }
+-
+- switch (retval) {
+- case 0:
+- /* no error */
+- break;
+-
+- case -EUCLEAN:
+- /* MTD's ECC fixed the data */
+- eccres = YAFFS_ECC_RESULT_FIXED;
+- dev->eccFixed++;
+- break;
+-
+- case -EBADMSG:
+- /* MTD's ECC could not fix the data */
+- dev->eccUnfixed++;
+- /* fall into... */
+- default:
+- rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+- etags->blockBad = (mtd->block_isbad)(mtd, addr);
+- return YAFFS_FAIL;
+- }
+-
+- /* Check for a blank/erased chunk.
+- */
+- if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
+- /* when blank, upper layers want eccResult to be <= NO_ERROR */
+- return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+- }
+-
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+- /* Read deleted status (bit) then return it to it's non-deleted
+- * state before performing tags mini-ECC check. pt1.deleted is
+- * inverted.
+- */
+- deleted = !pt1.deleted;
+- pt1.deleted = 1;
+-#else
+- (void) deleted; /* not used */
+-#endif
+-
+- /* Check the packed tags mini-ECC and correct if necessary/possible.
+- */
+- retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
+- switch (retval) {
+- case 0:
+- /* no tags error, use MTD result */
+- break;
+- case 1:
+- /* recovered tags-ECC error */
+- dev->tagsEccFixed++;
+- eccres = YAFFS_ECC_RESULT_FIXED;
+- break;
+- default:
+- /* unrecovered tags-ECC error */
+- dev->tagsEccUnfixed++;
+- return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+- }
+-
+- /* Unpack the tags to extended form and set ECC result.
+- * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
+- */
+- pt1.shouldBeFF = 0xFFFFFFFF;
+- yaffs_UnpackTags1(etags, &pt1);
+- etags->eccResult = eccres;
+-
+- /* Set deleted state.
+- */
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+- etags->chunkDeleted = deleted;
+-#else
+- etags->chunkDeleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
+-#endif
+- return YAFFS_OK;
+-}
+-
+-/* Mark a block bad.
+- *
+- * This is a persistant state.
+- * Use of this function should be rare.
+- *
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
+- int retval;
+-
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
+-
+- retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
+- return (retval) ? YAFFS_FAIL : YAFFS_OK;
+-}
+-
+-/* Check any MTD prerequists.
+- *
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
+-{
+- /* 2.6.18 has mtd->ecclayout->oobavail */
+- /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+- int oobavail = mtd->ecclayout->oobavail;
+-
+- if (oobavail < YTAG1_SIZE) {
+- yaffs_trace(YAFFS_TRACE_ERROR,
+- "mtd device has only %d bytes for tags, need %d",
+- oobavail, YTAG1_SIZE);
+- return YAFFS_FAIL;
+- }
+- return YAFFS_OK;
+-}
+-
+-/* Query for the current state of a specific block.
+- *
+- * Examine the tags of the first chunk of the block and return the state:
+- * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+- * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+- * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+- *
+- * Always returns YAFFS_OK.
+- */
+-int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * pState, int *pSequenceNumber)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int chunkNo = blockNo * dev->nChunksPerBlock;
+- yaffs_ExtendedTags etags;
+- int state = YAFFS_BLOCK_STATE_DEAD;
+- int seqnum = 0;
+- int retval;
+-
+- /* We don't yet have a good place to test for MTD config prerequists.
+- * Do it here as we are called during the initial scan.
+- */
+- if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
+- return YAFFS_FAIL;
+- }
+-
+- retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
+- if (etags.blockBad) {
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+- "block %d is marked bad", blockNo);
+- state = YAFFS_BLOCK_STATE_DEAD;
+- }
+- else if (etags.chunkUsed) {
+- state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+- seqnum = etags.sequenceNumber;
+- }
+- else {
+- state = YAFFS_BLOCK_STATE_EMPTY;
+- }
+-
+- *pState = state;
+- *pSequenceNumber = seqnum;
+-
+- /* query always succeeds */
+- return YAFFS_OK;
+-}
+-
+-#endif /*KERNEL_VERSION*/
+-
+---Boundary-00=_5LbTGmt62YoutxM--
+-
+-
+-
+--- a/fs/yaffs2/yaffs_mtdif1.h
++++ b/fs/yaffs2/yaffs_mtdif1.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -14,15 +14,15 @@
+ #ifndef __YAFFS_MTDIF1_H__
+ #define __YAFFS_MTDIF1_H__
+
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data, const yaffs_ExtendedTags *tags);
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_dev_t *dev, int nand_chunk,
++ const __u8 *data, const yaffs_ext_tags *tags);
+
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
+- __u8 *data, yaffs_ExtendedTags *tags);
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev, int nand_chunk,
++ __u8 *data, yaffs_ext_tags *tags);
+
+-int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++int nandmtd1_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no);
+
+-int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
++int nandmtd1_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif2.c
++++ b/fs/yaffs2/yaffs_mtdif2.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -13,11 +13,8 @@
+
+ /* mtd interface for YAFFS2 */
+
+-const char *yaffs_mtdif2_c_version =
+- "$Id: yaffs_mtdif2.c,v 1.23 2009-03-06 17:20:53 wookey Exp $";
+-
+ #include "yportenv.h"
+-
++#include "yaffs_trace.h"
+
+ #include "yaffs_mtdif2.h"
+
+@@ -27,15 +24,17 @@ const char *yaffs_mtdif2_c_version =
+
+ #include "yaffs_packedtags2.h"
+
++#include "yaffs_linux.h"
++
+ /* NB For use with inband tags....
+ * We assume that the data buffer is of size totalBytersPerChunk so that we can also
+ * use it to load the tags.
+ */
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags)
++ const yaffs_ext_tags *tags)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #else
+@@ -47,13 +46,16 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+
+ yaffs_PackedTags2 pt;
+
++ int packed_tags_size = dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void * packed_tags_ptr = dev->param.no_tags_ecc ? (void *) &pt.t : (void *)&pt;
++
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
+- TENDSTR), chunkInNAND, data, tags));
++ TENDSTR), nand_chunk, data, tags));
+
+
+- addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+@@ -61,30 +63,30 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ */
+ if (!data || !tags)
+ BUG();
+- else if (dev->inbandTags) {
++ else if (dev->param.inband_tags) {
+ yaffs_PackedTags2TagsPart *pt2tp;
+- pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->nDataBytesPerChunk);
++ pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->data_bytes_per_chunk);
+ yaffs_PackTags2TagsPart(pt2tp, tags);
+ } else
+- yaffs_PackTags2(&pt, tags);
++ yaffs_PackTags2(&pt, tags, !dev->param.no_tags_ecc);
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = (dev->inbandTags) ? 0 : sizeof(pt);
+- ops.len = dev->totalBytesPerChunk;
++ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
++ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (__u8 *)data;
+- ops.oobbuf = (dev->inbandTags) ? NULL : (void *)&pt;
++ ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+ retval = mtd->write_oob(mtd, addr, &ops);
+
+ #else
+- if (!dev->inbandTags) {
++ if (!dev->param.inband_tags) {
+ retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, (__u8 *) &pt, NULL);
++ mtd->write_ecc(mtd, addr, dev->data_bytes_per_chunk,
++ &dummy, data, (__u8 *) packed_tags_ptr, NULL);
+ } else {
+ retval =
+- mtd->write(mtd, addr, dev->totalBytesPerChunk, &dummy,
++ mtd->write(mtd, addr, dev->param.total_bytes_per_chunk, &dummy,
+ data);
+ }
+ #endif
+@@ -95,10 +97,10 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
+- __u8 *data, yaffs_ExtendedTags *tags)
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev, int nand_chunk,
++ __u8 *data, yaffs_ext_tags *tags)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+@@ -106,20 +108,23 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ int retval = 0;
+ int localData = 0;
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++ loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ yaffs_PackedTags2 pt;
+
++ int packed_tags_size = dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void * packed_tags_ptr = dev->param.no_tags_ecc ? (void *) &pt.t: (void *)&pt;
++
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
+- TENDSTR), chunkInNAND, data, tags));
++ TENDSTR), nand_chunk, data, tags));
+
+- if (dev->inbandTags) {
++ if (dev->param.inband_tags) {
+
+ if (!data) {
+ localData = 1;
+- data = yaffs_GetTempBuffer(dev, __LINE__);
++ data = yaffs_get_temp_buffer(dev, __LINE__);
+ }
+
+
+@@ -127,30 +132,30 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- if (dev->inbandTags || (data && !tags))
+- retval = mtd->read(mtd, addr, dev->totalBytesPerChunk,
++ if (dev->param.inband_tags || (data && !tags))
++ retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = sizeof(pt);
+- ops.len = data ? dev->nDataBytesPerChunk : sizeof(pt);
++ ops.ooblen = packed_tags_size;
++ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+- ops.oobbuf = dev->spareBuffer;
++ ops.oobbuf = yaffs_dev_to_lc(dev)->spareBuffer;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+ #else
+- if (!dev->inbandTags && data && tags) {
++ if (!dev->param.inband_tags && data && tags) {
+
+- retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++ retval = mtd->read_ecc(mtd, addr, dev->data_bytes_per_chunk,
+ &dummy, data, dev->spareBuffer,
+ NULL);
+ } else {
+ if (data)
+ retval =
+- mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++ mtd->read(mtd, addr, dev->data_bytes_per_chunk, &dummy,
+ data);
+- if (!dev->inbandTags && tags)
++ if (!dev->param.inband_tags && tags)
+ retval =
+ mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
+ dev->spareBuffer);
+@@ -158,41 +163,47 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ #endif
+
+
+- if (dev->inbandTags) {
++ if (dev->param.inband_tags) {
+ if (tags) {
+ yaffs_PackedTags2TagsPart *pt2tp;
+- pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->nDataBytesPerChunk];
+- yaffs_UnpackTags2TagsPart(tags, pt2tp);
++ pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->data_bytes_per_chunk];
++ yaffs_unpack_tags2tags_part(tags, pt2tp);
+ }
+ } else {
+ if (tags) {
+- memcpy(&pt, dev->spareBuffer, sizeof(pt));
+- yaffs_UnpackTags2(tags, &pt);
++ memcpy(packed_tags_ptr, yaffs_dev_to_lc(dev)->spareBuffer, packed_tags_size);
++ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+ }
+
+ if (localData)
+- yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+- if (tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
+- tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ if (tags && retval == -EBADMSG && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
++ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ dev->n_ecc_unfixed++;
++ }
++ if(tags && retval == -EUCLEAN && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
++ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
++ dev->n_ecc_fixed++;
++ }
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++int nandmtd2_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+ T(YAFFS_TRACE_MTD,
+- (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo));
++ (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), block_no));
+
+ retval =
+ mtd->block_markbad(mtd,
+- blockNo * dev->nChunksPerBlock *
+- dev->totalBytesPerChunk);
++ block_no * dev->param.chunks_per_block *
++ dev->param.total_bytes_per_chunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+@@ -201,41 +212,41 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
+
+ }
+
+-int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber)
++int nandmtd2_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+
+ T(YAFFS_TRACE_MTD,
+- (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo));
++ (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), block_no));
+ retval =
+ mtd->block_isbad(mtd,
+- blockNo * dev->nChunksPerBlock *
+- dev->totalBytesPerChunk);
++ block_no * dev->param.chunks_per_block *
++ dev->param.total_bytes_per_chunk);
+
+ if (retval) {
+ T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+- *sequenceNumber = 0;
++ *seq_number = 0;
+ } else {
+- yaffs_ExtendedTags t;
++ yaffs_ext_tags t;
+ nandmtd2_ReadChunkWithTagsFromNAND(dev,
+- blockNo *
+- dev->nChunksPerBlock, NULL,
++ block_no *
++ dev->param.chunks_per_block, NULL,
+ &t);
+
+- if (t.chunkUsed) {
+- *sequenceNumber = t.sequenceNumber;
++ if (t.chunk_used) {
++ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else {
+- *sequenceNumber = 0;
++ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ T(YAFFS_TRACE_MTD,
+- (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber,
++ (TSTR("block is bad seq %d state %d" TENDSTR), *seq_number,
+ *state));
+
+ if (retval == 0)
+--- a/fs/yaffs2/yaffs_mtdif2.h
++++ b/fs/yaffs2/yaffs_mtdif2.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,13 +17,13 @@
+ #define __YAFFS_MTDIF2_H__
+
+ #include "yaffs_guts.h"
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
+- __u8 *data, yaffs_ExtendedTags *tags);
+-int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+-int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
++ const yaffs_ext_tags *tags);
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev, int nand_chunk,
++ __u8 *data, yaffs_ext_tags *tags);
++int nandmtd2_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no);
++int nandmtd2_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif.c
++++ b/fs/yaffs2/yaffs_mtdif.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -11,9 +11,6 @@
+ * published by the Free Software Foundation.
+ */
+
+-const char *yaffs_mtdif_c_version =
+- "$Id: yaffs_mtdif.c,v 1.22 2009-03-06 17:20:51 wookey Exp $";
+-
+ #include "yportenv.h"
+
+
+@@ -24,208 +21,26 @@ const char *yaffs_mtdif_c_version =
+ #include "linux/time.h"
+ #include "linux/mtd/nand.h"
+
+-#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
+-static struct nand_oobinfo yaffs_oobinfo = {
+- .useecc = 1,
+- .eccbytes = 6,
+- .eccpos = {8, 9, 10, 13, 14, 15}
+-};
+-
+-static struct nand_oobinfo yaffs_noeccinfo = {
+- .useecc = 0,
+-};
+-#endif
+-
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+-static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
+-{
+- oob[0] = spare->tagByte0;
+- oob[1] = spare->tagByte1;
+- oob[2] = spare->tagByte2;
+- oob[3] = spare->tagByte3;
+- oob[4] = spare->tagByte4;
+- oob[5] = spare->tagByte5 & 0x3f;
+- oob[5] |= spare->blockStatus == 'Y' ? 0 : 0x80;
+- oob[5] |= spare->pageStatus == 0 ? 0 : 0x40;
+- oob[6] = spare->tagByte6;
+- oob[7] = spare->tagByte7;
+-}
+-
+-static inline void translate_oob2spare(yaffs_Spare *spare, __u8 *oob)
+-{
+- struct yaffs_NANDSpare *nspare = (struct yaffs_NANDSpare *)spare;
+- spare->tagByte0 = oob[0];
+- spare->tagByte1 = oob[1];
+- spare->tagByte2 = oob[2];
+- spare->tagByte3 = oob[3];
+- spare->tagByte4 = oob[4];
+- spare->tagByte5 = oob[5] == 0xff ? 0xff : oob[5] & 0x3f;
+- spare->blockStatus = oob[5] & 0x80 ? 0xff : 'Y';
+- spare->pageStatus = oob[5] & 0x40 ? 0xff : 0;
+- spare->ecc1[0] = spare->ecc1[1] = spare->ecc1[2] = 0xff;
+- spare->tagByte6 = oob[6];
+- spare->tagByte7 = oob[7];
+- spare->ecc2[0] = spare->ecc2[1] = spare->ecc2[2] = 0xff;
+-
+- nspare->eccres1 = nspare->eccres2 = 0; /* FIXME */
+-}
+-#endif
+-
+-int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data, const yaffs_Spare *spare)
+-{
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- struct mtd_oob_ops ops;
+-#endif
+- size_t dummy;
+- int retval = 0;
+-
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- __u8 spareAsBytes[8]; /* OOB */
+-
+- if (data && !spare)
+- retval = mtd->write(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data);
+- else if (spare) {
+- if (dev->useNANDECC) {
+- translate_spare2oob(spare, spareAsBytes);
+- ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = 8; /* temp hack */
+- } else {
+- ops.mode = MTD_OOB_RAW;
+- ops.ooblen = YAFFS_BYTES_PER_SPARE;
+- }
+- ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
+- ops.datbuf = (u8 *)data;
+- ops.ooboffs = 0;
+- ops.oobbuf = spareAsBytes;
+- retval = mtd->write_oob(mtd, addr, &ops);
+- }
+-#else
+- __u8 *spareAsBytes = (__u8 *) spare;
+-
+- if (data && spare) {
+- if (dev->useNANDECC)
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_oobinfo);
+- else
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_noeccinfo);
+- } else {
+- if (data)
+- retval =
+- mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+- data);
+- if (spare)
+- retval =
+- mtd->write_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
+- &dummy, spareAsBytes);
+- }
+-#endif
+-
+- if (retval == 0)
+- return YAFFS_OK;
+- else
+- return YAFFS_FAIL;
+-}
+-
+-int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
+- yaffs_Spare *spare)
+-{
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- struct mtd_oob_ops ops;
+-#endif
+- size_t dummy;
+- int retval = 0;
++#include "yaffs_linux.h"
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- __u8 spareAsBytes[8]; /* OOB */
+-
+- if (data && !spare)
+- retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data);
+- else if (spare) {
+- if (dev->useNANDECC) {
+- ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = 8; /* temp hack */
+- } else {
+- ops.mode = MTD_OOB_RAW;
+- ops.ooblen = YAFFS_BYTES_PER_SPARE;
+- }
+- ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
+- ops.datbuf = data;
+- ops.ooboffs = 0;
+- ops.oobbuf = spareAsBytes;
+- retval = mtd->read_oob(mtd, addr, &ops);
+- if (dev->useNANDECC)
+- translate_oob2spare(spare, spareAsBytes);
+- }
+-#else
+- __u8 *spareAsBytes = (__u8 *) spare;
+-
+- if (data && spare) {
+- if (dev->useNANDECC) {
+- /* Careful, this call adds 2 ints */
+- /* to the end of the spare data. Calling function */
+- /* should allocate enough memory for spare, */
+- /* i.e. [YAFFS_BYTES_PER_SPARE+2*sizeof(int)]. */
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_oobinfo);
+- } else {
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_noeccinfo);
+- }
+- } else {
+- if (data)
+- retval =
+- mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+- data);
+- if (spare)
+- retval =
+- mtd->read_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
+- &dummy, spareAsBytes);
+- }
+-#endif
+-
+- if (retval == 0)
+- return YAFFS_OK;
+- else
+- return YAFFS_FAIL;
+-}
+-
+-int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber)
++int nandmtd_EraseBlockInNAND(yaffs_dev_t *dev, int blockNumber)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ __u32 addr =
+- ((loff_t) blockNumber) * dev->nDataBytesPerChunk
+- * dev->nChunksPerBlock;
++ ((loff_t) blockNumber) * dev->param.total_bytes_per_chunk
++ * dev->param.chunks_per_block;
+ struct erase_info ei;
++
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+- ei.len = dev->nDataBytesPerChunk * dev->nChunksPerBlock;
++ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+- /* Todo finish off the ei if required */
+-
+- sema_init(&dev->sem, 0);
+-
+ retval = mtd->erase(mtd, &ei);
+
+ if (retval == 0)
+@@ -234,7 +49,7 @@ int nandmtd_EraseBlockInNAND(yaffs_Devic
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_InitialiseNAND(yaffs_Device *dev)
++int nandmtd_InitialiseNAND(yaffs_dev_t *dev)
+ {
+ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_mtdif.h
++++ b/fs/yaffs2/yaffs_mtdif.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -22,11 +22,6 @@
+ extern struct nand_oobinfo yaffs_oobinfo;
+ extern struct nand_oobinfo yaffs_noeccinfo;
+ #endif
+-
+-int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data, const yaffs_Spare *spare);
+-int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
+- yaffs_Spare *spare);
+-int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber);
+-int nandmtd_InitialiseNAND(yaffs_Device *dev);
++int nandmtd_EraseBlockInNAND(yaffs_dev_t *dev, int blockNumber);
++int nandmtd_InitialiseNAND(yaffs_dev_t *dev);
+ #endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nameval.c
+@@ -0,0 +1,197 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This simple implementation of a name-value store assumes a small number of values and fits
++ * into a small finite buffer.
++ *
++ * Each attribute is stored as a record:
++ * sizeof(int) bytes record size.
++ * strnlen+1 bytes name null terminated.
++ * nbytes value.
++ * ----------
++ * total size stored in record size
++ *
++ * This code has not been tested with unicode yet.
++ */
++
++
++#include "yaffs_nameval.h"
++
++#include "yportenv.h"
++
++static int nval_find(const char *xb, int xb_size, const YCHAR *name,
++ int *exist_size)
++{
++ int pos=0;
++ int size;
++
++ memcpy(&size,xb,sizeof(int));
++ while(size > 0 && (size < xb_size) && (pos + size < xb_size)){
++ if(yaffs_strncmp((YCHAR *)(xb+pos+sizeof(int)),name,size) == 0){
++ if(exist_size)
++ *exist_size = size;
++ return pos;
++ }
++ pos += size;
++ if(pos < xb_size -sizeof(int))
++ memcpy(&size,xb + pos,sizeof(int));
++ else
++ size = 0;
++ }
++ if(exist_size)
++ *exist_size = 0;
++ return -1;
++}
++
++static int nval_used(const char *xb, int xb_size)
++{
++ int pos=0;
++ int size;
++
++ memcpy(&size,xb + pos,sizeof(int));
++ while(size > 0 && (size < xb_size) && (pos + size < xb_size)){
++ pos += size;
++ if(pos < xb_size -sizeof(int))
++ memcpy(&size,xb + pos,sizeof(int));
++ else
++ size = 0;
++ }
++ return pos;
++}
++
++int nval_del(char *xb, int xb_size, const YCHAR *name)
++{
++ int pos = nval_find(xb, xb_size, name, NULL);
++ int size;
++
++ if(pos >= 0 && pos < xb_size){
++ /* Find size, shift rest over this record, then zero out the rest of buffer */
++ memcpy(&size,xb+pos,sizeof(int));
++ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
++ memset(xb + (xb_size - size),0,size);
++ return 0;
++ } else
++ return -ENODATA;
++}
++
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf, int bsize, int flags)
++{
++ int pos;
++ int namelen = yaffs_strnlen(name,xb_size);
++ int reclen;
++ int size_exist = 0;
++ int space;
++ int start;
++
++ pos = nval_find(xb,xb_size,name, &size_exist);
++
++ if(flags & XATTR_CREATE && pos >= 0)
++ return -EEXIST;
++ if(flags & XATTR_REPLACE && pos < 0)
++ return -ENODATA;
++
++ start = nval_used(xb,xb_size);
++ space = xb_size - start + size_exist;
++
++ reclen = (sizeof(int) + namelen + 1 + bsize);
++
++ if(reclen > space)
++ return -ENOSPC;
++
++ if(pos >= 0){
++ nval_del(xb,xb_size,name);
++ start = nval_used(xb, xb_size);
++ }
++
++ pos = start;
++
++ memcpy(xb + pos,&reclen,sizeof(int));
++ pos +=sizeof(int);
++ yaffs_strncpy((YCHAR *)(xb + pos), name, reclen);
++ pos+= (namelen+1);
++ memcpy(xb + pos,buf,bsize);
++ return 0;
++}
++
++int nval_get(const char *xb, int xb_size, const YCHAR *name, char *buf, int bsize)
++{
++ int pos = nval_find(xb,xb_size,name,NULL);
++ int size;
++
++ if(pos >= 0 && pos< xb_size){
++
++ memcpy(&size,xb +pos,sizeof(int));
++ pos+=sizeof(int); /* advance past record length */
++ size -= sizeof(int);
++
++ /* Advance over name string */
++ while(xb[pos] && size > 0 && pos < xb_size){
++ pos++;
++ size--;
++ }
++ /*Advance over NUL */
++ pos++;
++ size--;
++
++ if(size <= bsize){
++ memcpy(buf,xb + pos,size);
++ return size;
++ }
++
++ }
++ if(pos >= 0)
++ return -ERANGE;
++ else
++ return -ENODATA;
++}
++
++int nval_list(const char *xb, int xb_size, char *buf, int bsize)
++{
++ int pos = 0;
++ int size;
++ int name_len;
++ int ncopied = 0;
++ int filled = 0;
++
++ memcpy(&size,xb + pos,sizeof(int));
++ while(size > sizeof(int) && size <= xb_size && (pos + size) < xb_size && !filled){
++ pos+= sizeof(int);
++ size-=sizeof(int);
++ name_len = yaffs_strnlen((YCHAR *)(xb + pos), size);
++ if(ncopied + name_len + 1 < bsize){
++ memcpy(buf,xb+pos,name_len * sizeof(YCHAR));
++ buf+= name_len;
++ *buf = '\0';
++ buf++;
++ if(sizeof(YCHAR) > 1){
++ *buf = '\0';
++ buf++;
++ }
++ ncopied += (name_len+1);
++ } else
++ filled = 1;
++ pos+=size;
++ if(pos < xb_size -sizeof(int))
++ memcpy(&size,xb + pos,sizeof(int));
++ else
++ size = 0;
++ }
++ return ncopied;
++}
++
++
++int nval_hasvalues(const char *xb, int xb_size)
++{
++ return nval_used(xb, xb_size) > 0;
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nameval.h
+@@ -0,0 +1,25 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++#ifndef __NAMEVAL_H__
++#define __NAMEVAL_H__
++
++#include "yportenv.h"
++
++int nval_del(char *xb, int xb_size, const YCHAR *name);
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf, int bsize, int flags);
++int nval_get(const char *xb, int xb_size, const YCHAR *name, char *buf, int bsize);
++int nval_list(const char *xb, int xb_size, char *buf, int bsize);
++int nval_hasvalues(const char *xb, int xb_size);
++#endif
+--- a/fs/yaffs2/yaffs_nand.c
++++ b/fs/yaffs2/yaffs_nand.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -11,124 +11,129 @@
+ * published by the Free Software Foundation.
+ */
+
+-const char *yaffs_nand_c_version =
+- "$Id: yaffs_nand.c,v 1.10 2009-03-06 17:20:54 wookey Exp $";
+-
+ #include "yaffs_nand.h"
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_tagsvalidity.h"
+
+ #include "yaffs_getblockinfo.h"
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++int yaffs_rd_chunk_tags_nand(yaffs_dev_t *dev, int nand_chunk,
+ __u8 *buffer,
+- yaffs_ExtendedTags *tags)
++ yaffs_ext_tags *tags)
+ {
+ int result;
+- yaffs_ExtendedTags localTags;
++ yaffs_ext_tags localTags;
++
++ int realignedChunkInNAND = nand_chunk - dev->chunk_offset;
+
+- int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
++ dev->n_page_reads++;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+ if (!tags)
+ tags = &localTags;
+
+- if (dev->readChunkWithTagsFromNAND)
+- result = dev->readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer,
++ if (dev->param.read_chunk_tags_fn)
++ result = dev->param.read_chunk_tags_fn(dev, realignedChunkInNAND, buffer,
+ tags);
+ else
+- result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev,
++ result = yaffs_tags_compat_rd(dev,
+ realignedChunkInNAND,
+ buffer,
+ tags);
+ if (tags &&
+- tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) {
++ tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
+- yaffs_HandleChunkError(dev, bi);
++ yaffs_block_info_t *bi;
++ bi = yaffs_get_block_info(dev, nand_chunk/dev->param.chunks_per_block);
++ yaffs_handle_chunk_error(dev, bi);
+ }
+
+ return result;
+ }
+
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_wr_chunk_tags_nand(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *buffer,
+- yaffs_ExtendedTags *tags)
++ yaffs_ext_tags *tags)
+ {
+- chunkInNAND -= dev->chunkOffset;
++
++ dev->n_page_writes++;
++
++ nand_chunk -= dev->chunk_offset;
+
+
+ if (tags) {
+- tags->sequenceNumber = dev->sequenceNumber;
+- tags->chunkUsed = 1;
+- if (!yaffs_ValidateTags(tags)) {
++ tags->seq_number = dev->seq_number;
++ tags->chunk_used = 1;
++ if (!yaffs_validate_tags(tags)) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("Writing uninitialised tags" TENDSTR)));
+ YBUG();
+ }
+ T(YAFFS_TRACE_WRITE,
+- (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND,
+- tags->objectId, tags->chunkId));
++ (TSTR("Writing chunk %d tags %d %d" TENDSTR), nand_chunk,
++ tags->obj_id, tags->chunk_id));
+ } else {
+ T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR)));
+ YBUG();
+ }
+
+- if (dev->writeChunkWithTagsToNAND)
+- return dev->writeChunkWithTagsToNAND(dev, chunkInNAND, buffer,
++ if (dev->param.write_chunk_tags_fn)
++ return dev->param.write_chunk_tags_fn(dev, nand_chunk, buffer,
+ tags);
+ else
+- return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev,
+- chunkInNAND,
++ return yaffs_tags_compat_wr(dev,
++ nand_chunk,
+ buffer,
+ tags);
+ }
+
+-int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo)
++int yaffs_mark_bad(yaffs_dev_t *dev, int block_no)
+ {
+- blockNo -= dev->blockOffset;
++ block_no -= dev->block_offset;
++
+
+-;
+- if (dev->markNANDBlockBad)
+- return dev->markNANDBlockBad(dev, blockNo);
++ if (dev->param.bad_block_fn)
++ return dev->param.bad_block_fn(dev, block_no);
+ else
+- return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
++ return yaffs_tags_compat_mark_bad(dev, block_no);
+ }
+
+-int yaffs_QueryInitialBlockState(yaffs_Device *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- __u32 *sequenceNumber)
++int yaffs_query_init_block_state(yaffs_dev_t *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ __u32 *seq_number)
+ {
+- blockNo -= dev->blockOffset;
++ block_no -= dev->block_offset;
+
+- if (dev->queryNANDBlock)
+- return dev->queryNANDBlock(dev, blockNo, state, sequenceNumber);
++ if (dev->param.query_block_fn)
++ return dev->param.query_block_fn(dev, block_no, state, seq_number);
+ else
+- return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo,
++ return yaffs_tags_compat_query_block(dev, block_no,
+ state,
+- sequenceNumber);
++ seq_number);
+ }
+
+
+-int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+- int blockInNAND)
++int yaffs_erase_block(struct yaffs_dev_s *dev,
++ int flash_block)
+ {
+ int result;
+
+- blockInNAND -= dev->blockOffset;
++ flash_block -= dev->block_offset;
+
++ dev->n_erasures++;
+
+- dev->nBlockErasures++;
+- result = dev->eraseBlockInNAND(dev, blockInNAND);
++ result = dev->param.erase_fn(dev, flash_block);
+
+ return result;
+ }
+
+-int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev)
++int yaffs_init_nand(struct yaffs_dev_s *dev)
+ {
+- return dev->initialiseNAND(dev);
++ if(dev->param.initialise_flash_fn)
++ return dev->param.initialise_flash_fn(dev);
++ return YAFFS_OK;
+ }
+
+
+--- a/fs/yaffs2/yaffs_nandemul2k.h
++++ b/fs/yaffs2/yaffs_nandemul2k.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -20,18 +20,18 @@
+
+ #include "yaffs_guts.h"
+
+-int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 *data,
+- yaffs_ExtendedTags *tags);
+-int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+-int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
+-int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+- int blockInNAND);
+-int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ const yaffs_ext_tags *tags);
++int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_dev_s *dev,
++ int nand_chunk, __u8 *data,
++ yaffs_ext_tags *tags);
++int nandemul2k_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no);
++int nandemul2k_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
++int nandemul2k_EraseBlockInNAND(struct yaffs_dev_s *dev,
++ int flash_block);
++int nandemul2k_InitialiseNAND(struct yaffs_dev_s *dev);
+ int nandemul2k_GetBytesPerChunk(void);
+ int nandemul2k_GetChunksPerBlock(void);
+ int nandemul2k_GetNumberOfBlocks(void);
+--- a/fs/yaffs2/yaffs_nand.h
++++ b/fs/yaffs2/yaffs_nand.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -19,26 +19,26 @@
+
+
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++int yaffs_rd_chunk_tags_nand(yaffs_dev_t *dev, int nand_chunk,
+ __u8 *buffer,
+- yaffs_ExtendedTags *tags);
++ yaffs_ext_tags *tags);
+
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_wr_chunk_tags_nand(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *buffer,
+- yaffs_ExtendedTags *tags);
++ yaffs_ext_tags *tags);
+
+-int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo);
++int yaffs_mark_bad(yaffs_dev_t *dev, int block_no);
+
+-int yaffs_QueryInitialBlockState(yaffs_Device *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- unsigned *sequenceNumber);
++int yaffs_query_init_block_state(yaffs_dev_t *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ unsigned *seq_number);
+
+-int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+- int blockInNAND);
++int yaffs_erase_block(struct yaffs_dev_s *dev,
++ int flash_block);
+
+-int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++int yaffs_init_nand(struct yaffs_dev_s *dev);
+
+ #endif
+
+--- a/fs/yaffs2/yaffs_packedtags1.c
++++ b/fs/yaffs2/yaffs_packedtags1.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -14,37 +14,37 @@
+ #include "yaffs_packedtags1.h"
+ #include "yportenv.h"
+
+-void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t)
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ext_tags *t)
+ {
+- pt->chunkId = t->chunkId;
+- pt->serialNumber = t->serialNumber;
+- pt->byteCount = t->byteCount;
+- pt->objectId = t->objectId;
++ pt->chunk_id = t->chunk_id;
++ pt->serial_number = t->serial_number;
++ pt->n_bytes = t->n_bytes;
++ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+- pt->deleted = (t->chunkDeleted) ? 0 : 1;
++ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unusedStuff = 0;
+ pt->shouldBeFF = 0xFFFFFFFF;
+
+ }
+
+-void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt)
++void yaffs_unpack_tags1(yaffs_ext_tags *t, const yaffs_PackedTags1 *pt)
+ {
+ static const __u8 allFF[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff };
+
+ if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
+- t->blockBad = 0;
++ t->block_bad = 0;
+ if (pt->shouldBeFF != 0xFFFFFFFF)
+- t->blockBad = 1;
+- t->chunkUsed = 1;
+- t->objectId = pt->objectId;
+- t->chunkId = pt->chunkId;
+- t->byteCount = pt->byteCount;
+- t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+- t->chunkDeleted = (pt->deleted) ? 0 : 1;
+- t->serialNumber = pt->serialNumber;
++ t->block_bad = 1;
++ t->chunk_used = 1;
++ t->obj_id = pt->obj_id;
++ t->chunk_id = pt->chunk_id;
++ t->n_bytes = pt->n_bytes;
++ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ t->is_deleted = (pt->deleted) ? 0 : 1;
++ t->serial_number = pt->serial_number;
+ } else {
+- memset(t, 0, sizeof(yaffs_ExtendedTags));
++ memset(t, 0, sizeof(yaffs_ext_tags));
+ }
+ }
+--- a/fs/yaffs2/yaffs_packedtags1.h
++++ b/fs/yaffs2/yaffs_packedtags1.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -21,10 +21,10 @@
+ #include "yaffs_guts.h"
+
+ typedef struct {
+- unsigned chunkId:20;
+- unsigned serialNumber:2;
+- unsigned byteCount:10;
+- unsigned objectId:18;
++ unsigned chunk_id:20;
++ unsigned serial_number:2;
++ unsigned n_bytes:10;
++ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unusedStuff:1;
+@@ -32,6 +32,6 @@ typedef struct {
+
+ } yaffs_PackedTags1;
+
+-void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t);
+-void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt);
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ext_tags *t);
++void yaffs_unpack_tags1(yaffs_ext_tags *t, const yaffs_PackedTags1 *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_packedtags2.c
++++ b/fs/yaffs2/yaffs_packedtags2.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -13,6 +13,7 @@
+
+ #include "yaffs_packedtags2.h"
+ #include "yportenv.h"
++#include "yaffs_trace.h"
+ #include "yaffs_tagsvalidity.h"
+
+ /* This code packs a set of extended tags into a binary structure for
+@@ -24,7 +25,7 @@
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+-/* Extra flags applied to chunkId */
++/* Extra flags applied to chunk_id */
+
+ #define EXTRA_HEADER_INFO_FLAG 0x80000000
+ #define EXTRA_SHRINK_FLAG 0x40000000
+@@ -42,53 +43,53 @@ static void yaffs_DumpPackedTags2TagsPar
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
+- ptt->objectId, ptt->chunkId, ptt->byteCount,
+- ptt->sequenceNumber));
++ ptt->obj_id, ptt->chunk_id, ptt->n_bytes,
++ ptt->seq_number));
+ }
+ static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 *pt)
+ {
+ yaffs_DumpPackedTags2TagsPart(&pt->t);
+ }
+
+-static void yaffs_DumpTags2(const yaffs_ExtendedTags *t)
++static void yaffs_DumpTags2(const yaffs_ext_tags *t)
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d"
+- TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
+- t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
+- t->sequenceNumber));
++ TENDSTR), t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
++ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
++ t->seq_number));
+
+ }
+
+ void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *ptt,
+- const yaffs_ExtendedTags *t)
++ const yaffs_ext_tags *t)
+ {
+- ptt->chunkId = t->chunkId;
+- ptt->sequenceNumber = t->sequenceNumber;
+- ptt->byteCount = t->byteCount;
+- ptt->objectId = t->objectId;
++ ptt->chunk_id = t->chunk_id;
++ ptt->seq_number = t->seq_number;
++ ptt->n_bytes = t->n_bytes;
++ ptt->obj_id = t->obj_id;
+
+- if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
++ if (t->chunk_id == 0 && t->extra_available) {
+ /* Store the extra header info instead */
+- /* We save the parent object in the chunkId */
+- ptt->chunkId = EXTRA_HEADER_INFO_FLAG
+- | t->extraParentObjectId;
+- if (t->extraIsShrinkHeader)
+- ptt->chunkId |= EXTRA_SHRINK_FLAG;
+- if (t->extraShadows)
+- ptt->chunkId |= EXTRA_SHADOWS_FLAG;
+-
+- ptt->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+- ptt->objectId |=
+- (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
+-
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
+- ptt->byteCount = t->extraEquivalentObjectId;
+- else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE)
+- ptt->byteCount = t->extraFileLength;
++ /* We save the parent object in the chunk_id */
++ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG
++ | t->extra_parent_id;
++ if (t->extra_is_shrink)
++ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
++ if (t->extra_shadows)
++ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
++
++ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++ ptt->obj_id |=
++ (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
++
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ ptt->n_bytes = t->extra_equiv_id;
++ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++ ptt->n_bytes = t->extra_length;
+ else
+- ptt->byteCount = 0;
++ ptt->n_bytes = 0;
+ }
+
+ yaffs_DumpPackedTags2TagsPart(ptt);
+@@ -96,59 +97,56 @@ void yaffs_PackTags2TagsPart(yaffs_Packe
+ }
+
+
+-void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t)
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ext_tags *t, int tagsECC)
+ {
+ yaffs_PackTags2TagsPart(&pt->t, t);
+
+-#ifndef YAFFS_IGNORE_TAGS_ECC
+- {
+- yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++ if(tagsECC)
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(yaffs_PackedTags2TagsPart),
+ &pt->ecc);
+- }
+-#endif
+ }
+
+
+-void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t,
++void yaffs_unpack_tags2tags_part(yaffs_ext_tags *t,
+ yaffs_PackedTags2TagsPart *ptt)
+ {
+
+- memset(t, 0, sizeof(yaffs_ExtendedTags));
++ memset(t, 0, sizeof(yaffs_ext_tags));
+
+- yaffs_InitialiseTags(t);
++ yaffs_init_tags(t);
+
+- if (ptt->sequenceNumber != 0xFFFFFFFF) {
+- t->blockBad = 0;
+- t->chunkUsed = 1;
+- t->objectId = ptt->objectId;
+- t->chunkId = ptt->chunkId;
+- t->byteCount = ptt->byteCount;
+- t->chunkDeleted = 0;
+- t->serialNumber = 0;
+- t->sequenceNumber = ptt->sequenceNumber;
++ if (ptt->seq_number != 0xFFFFFFFF) {
++ t->block_bad = 0;
++ t->chunk_used = 1;
++ t->obj_id = ptt->obj_id;
++ t->chunk_id = ptt->chunk_id;
++ t->n_bytes = ptt->n_bytes;
++ t->is_deleted = 0;
++ t->serial_number = 0;
++ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+
+- if (ptt->chunkId & EXTRA_HEADER_INFO_FLAG) {
+- t->chunkId = 0;
+- t->byteCount = 0;
+-
+- t->extraHeaderInfoAvailable = 1;
+- t->extraParentObjectId =
+- ptt->chunkId & (~(ALL_EXTRA_FLAGS));
+- t->extraIsShrinkHeader =
+- (ptt->chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
+- t->extraShadows =
+- (ptt->chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+- t->extraObjectType =
+- ptt->objectId >> EXTRA_OBJECT_TYPE_SHIFT;
+- t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
++ t->chunk_id = 0;
++ t->n_bytes = 0;
++
++ t->extra_available = 1;
++ t->extra_parent_id =
++ ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
++ t->extra_is_shrink =
++ (ptt->chunk_id & EXTRA_SHRINK_FLAG) ? 1 : 0;
++ t->extra_shadows =
++ (ptt->chunk_id & EXTRA_SHADOWS_FLAG) ? 1 : 0;
++ t->extra_obj_type =
++ ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
++ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
+- t->extraEquivalentObjectId = ptt->byteCount;
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ t->extra_equiv_id = ptt->n_bytes;
+ else
+- t->extraFileLength = ptt->byteCount;
++ t->extra_length = ptt->n_bytes;
+ }
+ }
+
+@@ -158,49 +156,43 @@ void yaffs_UnpackTags2TagsPart(yaffs_Ext
+ }
+
+
+-void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt)
++void yaffs_unpack_tags2(yaffs_ext_tags *t, yaffs_PackedTags2 *pt, int tagsECC)
+ {
+
+- yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+- if (pt->t.sequenceNumber != 0xFFFFFFFF) {
+- /* Page is in use */
+-#ifndef YAFFS_IGNORE_TAGS_ECC
+- {
+- yaffs_ECCOther ecc;
+- int result;
+- yaffs_ECCCalculateOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &ecc);
+- result =
+- yaffs_ECCCorrectOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &pt->ecc, &ecc);
+- switch (result) {
++ if (pt->t.seq_number != 0xFFFFFFFF &&
++ tagsECC){
++ /* Chunk is in use and we need to do ECC */
++
++ yaffs_ECCOther ecc;
++ int result;
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
++ sizeof(yaffs_PackedTags2TagsPart),
++ &ecc);
++ result = yaffs_ecc_correct_other((unsigned char *)&pt->t,
++ sizeof(yaffs_PackedTags2TagsPart),
++ &pt->ecc, &ecc);
++ switch (result) {
+ case 0:
+- eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+- eccResult = YAFFS_ECC_RESULT_FIXED;
++ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+- eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+- eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+- }
++ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+-#endif
+ }
+
+- yaffs_UnpackTags2TagsPart(t, &pt->t);
++ yaffs_unpack_tags2tags_part(t, &pt->t);
+
+- t->eccResult = eccResult;
++ t->ecc_result = ecc_result;
+
+ yaffs_DumpPackedTags2(pt);
+ yaffs_DumpTags2(t);
+-
+ }
+
+--- a/fs/yaffs2/yaffs_packedtags2.h
++++ b/fs/yaffs2/yaffs_packedtags2.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -22,10 +22,10 @@
+ #include "yaffs_ecc.h"
+
+ typedef struct {
+- unsigned sequenceNumber;
+- unsigned objectId;
+- unsigned chunkId;
+- unsigned byteCount;
++ unsigned seq_number;
++ unsigned obj_id;
++ unsigned chunk_id;
++ unsigned n_bytes;
+ } yaffs_PackedTags2TagsPart;
+
+ typedef struct {
+@@ -34,10 +34,10 @@ typedef struct {
+ } yaffs_PackedTags2;
+
+ /* Full packed tags with ECC, used for oob tags */
+-void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t);
+-void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt);
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ext_tags *t, int tagsECC);
++void yaffs_unpack_tags2(yaffs_ext_tags *t, yaffs_PackedTags2 *pt, int tagsECC);
+
+ /* Only the tags part (no ECC for use with inband tags */
+-void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ExtendedTags *t);
+-void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, yaffs_PackedTags2TagsPart *pt);
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ext_tags *t);
++void yaffs_unpack_tags2tags_part(yaffs_ext_tags *t, yaffs_PackedTags2TagsPart *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_qsort.h
++++ b/fs/yaffs2/yaffs_qsort.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,7 +17,18 @@
+ #ifndef __YAFFS_QSORT_H__
+ #define __YAFFS_QSORT_H__
+
++#ifdef __KERNEL__
++#include <linux/sort.h>
++
++extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
++ int (*cmp)(const void *, const void *)){
++ sort(base, total_elems, size, cmp, NULL);
++}
++
++#else
++
+ extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
+ int (*cmp)(const void *, const void *));
+
+ #endif
++#endif
+--- a/fs/yaffs2/yaffs_tagscompat.c
++++ b/fs/yaffs2/yaffs_tagscompat.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -15,19 +15,20 @@
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_ecc.h"
+ #include "yaffs_getblockinfo.h"
++#include "yaffs_trace.h"
+
+-static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND);
++static void yaffs_handle_rd_data_error(yaffs_dev_t *dev, int nand_chunk);
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_check_written_block(yaffs_dev_t *dev, int nand_chunk);
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_Spare *spare);
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_Spare *spare);
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND);
++ const yaffs_spare *spare);
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_spare *spare);
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk);
+ #endif
+
+-static const char yaffs_countBitsTable[256] = {
++static const char yaffs_count_bits_table[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+@@ -46,26 +47,26 @@ static const char yaffs_countBitsTable[2
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+ };
+
+-int yaffs_CountBits(__u8 x)
++int yaffs_count_bits(__u8 x)
+ {
+ int retVal;
+- retVal = yaffs_countBitsTable[x];
++ retVal = yaffs_count_bits_table[x];
+ return retVal;
+ }
+
+ /********** Tags ECC calculations *********/
+
+-void yaffs_CalcECC(const __u8 *data, yaffs_Spare *spare)
++void yaffs_calc_ecc(const __u8 *data, yaffs_spare *spare)
+ {
+- yaffs_ECCCalculate(data, spare->ecc1);
+- yaffs_ECCCalculate(&data[256], spare->ecc2);
++ yaffs_ecc_cacl(data, spare->ecc1);
++ yaffs_ecc_cacl(&data[256], spare->ecc2);
+ }
+
+-void yaffs_CalcTagsECC(yaffs_Tags *tags)
++void yaffs_calc_tags_ecc(yaffs_tags_t *tags)
+ {
+ /* Calculate an ecc */
+
+- unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++ unsigned char *b = ((yaffs_tags_union_t *) tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+@@ -84,24 +85,24 @@ void yaffs_CalcTagsECC(yaffs_Tags *tags)
+
+ }
+
+-int yaffs_CheckECCOnTags(yaffs_Tags *tags)
++int yaffs_check_tags_ecc(yaffs_tags_t *tags)
+ {
+ unsigned ecc = tags->ecc;
+
+- yaffs_CalcTagsECC(tags);
++ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+- unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++ unsigned char *b = ((yaffs_tags_union_t *) tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+- yaffs_CalcTagsECC(tags);
++ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+@@ -115,76 +116,73 @@ int yaffs_CheckECCOnTags(yaffs_Tags *tag
+
+ /********** Tags **********/
+
+-static void yaffs_LoadTagsIntoSpare(yaffs_Spare *sparePtr,
+- yaffs_Tags *tagsPtr)
++static void yaffs_load_tags_to_spare(yaffs_spare *sparePtr,
++ yaffs_tags_t *tagsPtr)
+ {
+- yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++ yaffs_tags_union_t *tu = (yaffs_tags_union_t *) tagsPtr;
+
+- yaffs_CalcTagsECC(tagsPtr);
++ yaffs_calc_tags_ecc(tagsPtr);
+
+- sparePtr->tagByte0 = tu->asBytes[0];
+- sparePtr->tagByte1 = tu->asBytes[1];
+- sparePtr->tagByte2 = tu->asBytes[2];
+- sparePtr->tagByte3 = tu->asBytes[3];
+- sparePtr->tagByte4 = tu->asBytes[4];
+- sparePtr->tagByte5 = tu->asBytes[5];
+- sparePtr->tagByte6 = tu->asBytes[6];
+- sparePtr->tagByte7 = tu->asBytes[7];
++ sparePtr->tb0 = tu->as_bytes[0];
++ sparePtr->tb1 = tu->as_bytes[1];
++ sparePtr->tb2 = tu->as_bytes[2];
++ sparePtr->tb3 = tu->as_bytes[3];
++ sparePtr->tb4 = tu->as_bytes[4];
++ sparePtr->tb5 = tu->as_bytes[5];
++ sparePtr->tb6 = tu->as_bytes[6];
++ sparePtr->tb7 = tu->as_bytes[7];
+ }
+
+-static void yaffs_GetTagsFromSpare(yaffs_Device *dev, yaffs_Spare *sparePtr,
+- yaffs_Tags *tagsPtr)
++static void yaffs_get_tags_from_spare(yaffs_dev_t *dev, yaffs_spare *sparePtr,
++ yaffs_tags_t *tagsPtr)
+ {
+- yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++ yaffs_tags_union_t *tu = (yaffs_tags_union_t *) tagsPtr;
+ int result;
+
+- tu->asBytes[0] = sparePtr->tagByte0;
+- tu->asBytes[1] = sparePtr->tagByte1;
+- tu->asBytes[2] = sparePtr->tagByte2;
+- tu->asBytes[3] = sparePtr->tagByte3;
+- tu->asBytes[4] = sparePtr->tagByte4;
+- tu->asBytes[5] = sparePtr->tagByte5;
+- tu->asBytes[6] = sparePtr->tagByte6;
+- tu->asBytes[7] = sparePtr->tagByte7;
++ tu->as_bytes[0] = sparePtr->tb0;
++ tu->as_bytes[1] = sparePtr->tb1;
++ tu->as_bytes[2] = sparePtr->tb2;
++ tu->as_bytes[3] = sparePtr->tb3;
++ tu->as_bytes[4] = sparePtr->tb4;
++ tu->as_bytes[5] = sparePtr->tb5;
++ tu->as_bytes[6] = sparePtr->tb6;
++ tu->as_bytes[7] = sparePtr->tb7;
+
+- result = yaffs_CheckECCOnTags(tagsPtr);
++ result = yaffs_check_tags_ecc(tagsPtr);
+ if (result > 0)
+- dev->tagsEccFixed++;
++ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+- dev->tagsEccUnfixed++;
++ dev->n_tags_ecc_unfixed++;
+ }
+
+-static void yaffs_SpareInitialise(yaffs_Spare *spare)
++static void yaffs_spare_init(yaffs_spare *spare)
+ {
+- memset(spare, 0xFF, sizeof(yaffs_Spare));
++ memset(spare, 0xFF, sizeof(yaffs_spare));
+ }
+
+-static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- yaffs_Spare *spare)
++static int yaffs_wr_nand(struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ yaffs_spare *spare)
+ {
+- if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
++ if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> yaffs chunk %d is not valid" TENDSTR),
+- chunkInNAND));
++ nand_chunk));
+ return YAFFS_FAIL;
+ }
+
+- dev->nPageWrites++;
+- return dev->writeChunkToNAND(dev, chunkInNAND, data, spare);
++ return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+ }
+
+-static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND,
++static int yaffs_rd_chunk_nand(struct yaffs_dev_s *dev,
++ int nand_chunk,
+ __u8 *data,
+- yaffs_Spare *spare,
+- yaffs_ECCResult *eccResult,
++ yaffs_spare *spare,
++ yaffs_ecc_result *ecc_result,
+ int doErrorCorrection)
+ {
+ int retVal;
+- yaffs_Spare localSpare;
+-
+- dev->nPageReads++;
++ yaffs_spare localSpare;
+
+ if (!spare && data) {
+ /* If we don't have a real spare, then we use a local one. */
+@@ -192,107 +190,107 @@ static int yaffs_ReadChunkFromNAND(struc
+ spare = &localSpare;
+ }
+
+- if (!dev->useNANDECC) {
+- retVal = dev->readChunkFromNAND(dev, chunkInNAND, data, spare);
++ if (!dev->param.use_nand_ecc) {
++ retVal = dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+ if (data && doErrorCorrection) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+- int eccResult1, eccResult2;
++ int ecc_result1, ecc_result2;
+ __u8 calcEcc[3];
+
+- yaffs_ECCCalculate(data, calcEcc);
+- eccResult1 =
+- yaffs_ECCCorrect(data, spare->ecc1, calcEcc);
+- yaffs_ECCCalculate(&data[256], calcEcc);
+- eccResult2 =
+- yaffs_ECCCorrect(&data[256], spare->ecc2, calcEcc);
++ yaffs_ecc_cacl(data, calcEcc);
++ ecc_result1 =
++ yaffs_ecc_correct(data, spare->ecc1, calcEcc);
++ yaffs_ecc_cacl(&data[256], calcEcc);
++ ecc_result2 =
++ yaffs_ecc_correct(&data[256], spare->ecc2, calcEcc);
+
+- if (eccResult1 > 0) {
++ if (ecc_result1 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error fix performed on chunk %d:0"
+- TENDSTR), chunkInNAND));
+- dev->eccFixed++;
+- } else if (eccResult1 < 0) {
++ TENDSTR), nand_chunk));
++ dev->n_ecc_fixed++;
++ } else if (ecc_result1 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error unfixed on chunk %d:0"
+- TENDSTR), chunkInNAND));
+- dev->eccUnfixed++;
++ TENDSTR), nand_chunk));
++ dev->n_ecc_unfixed++;
+ }
+
+- if (eccResult2 > 0) {
++ if (ecc_result2 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error fix performed on chunk %d:1"
+- TENDSTR), chunkInNAND));
+- dev->eccFixed++;
+- } else if (eccResult2 < 0) {
++ TENDSTR), nand_chunk));
++ dev->n_ecc_fixed++;
++ } else if (ecc_result2 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error unfixed on chunk %d:1"
+- TENDSTR), chunkInNAND));
+- dev->eccUnfixed++;
++ TENDSTR), nand_chunk));
++ dev->n_ecc_unfixed++;
+ }
+
+- if (eccResult1 || eccResult2) {
++ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+- yaffs_HandleReadDataError(dev, chunkInNAND);
++ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+- if (eccResult1 < 0 || eccResult2 < 0)
+- *eccResult = YAFFS_ECC_RESULT_UNFIXED;
+- else if (eccResult1 > 0 || eccResult2 > 0)
+- *eccResult = YAFFS_ECC_RESULT_FIXED;
++ if (ecc_result1 < 0 || ecc_result2 < 0)
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ else if (ecc_result1 > 0 || ecc_result2 > 0)
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+- *eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+- struct yaffs_NANDSpare nspare;
++ struct yaffs_nand_spare nspare;
+
+ memset(&nspare, 0, sizeof(nspare));
+
+- retVal = dev->readChunkFromNAND(dev, chunkInNAND, data,
+- (yaffs_Spare *) &nspare);
+- memcpy(spare, &nspare, sizeof(yaffs_Spare));
++ retVal = dev->param.read_chunk_fn(dev, nand_chunk, data,
++ (yaffs_spare *) &nspare);
++ memcpy(spare, &nspare, sizeof(yaffs_spare));
+ if (data && doErrorCorrection) {
+ if (nspare.eccres1 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error fix performed on chunk %d:0"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ } else if (nspare.eccres1 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error unfixed on chunk %d:0"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ }
+
+ if (nspare.eccres2 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error fix performed on chunk %d:1"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ } else if (nspare.eccres2 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error unfixed on chunk %d:1"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+- yaffs_HandleReadDataError(dev, chunkInNAND);
++ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+- *eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+- *eccResult = YAFFS_ECC_RESULT_FIXED;
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+- *eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+@@ -300,17 +298,17 @@ static int yaffs_ReadChunkFromNAND(struc
+ }
+
+ #ifdef NOTYET
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND)
++static int yaffs_check_chunk_erased(struct yaffs_dev_s *dev,
++ int nand_chunk)
+ {
+ static int init;
+ static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
+ static __u8 data[YAFFS_BYTES_PER_CHUNK];
+ /* Might as well always allocate the larger size for */
+- /* dev->useNANDECC == true; */
+- static __u8 spare[sizeof(struct yaffs_NANDSpare)];
++ /* dev->param.use_nand_ecc == true; */
++ static __u8 spare[sizeof(struct yaffs_nand_spare)];
+
+- dev->readChunkFromNAND(dev, chunkInNAND, data, (yaffs_Spare *) spare);
++ dev->param.read_chunk_fn(dev, nand_chunk, data, (yaffs_spare *) spare);
+
+ if (!init) {
+ memset(cmpbuf, 0xff, YAFFS_BYTES_PER_CHUNK);
+@@ -331,14 +329,14 @@ static int yaffs_CheckChunkErased(struct
+ * Functions for robustisizing
+ */
+
+-static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND)
++static void yaffs_handle_rd_data_error(yaffs_dev_t *dev, int nand_chunk)
+ {
+- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+- yaffs_GetBlockInfo(dev, blockInNAND + dev->blockOffset)->needsRetiring = 1;
++ yaffs_get_block_info(dev, flash_block + dev->block_offset)->needs_retiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
++ (TSTR("**>>Block %d marked for retirement" TENDSTR), flash_block));
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+@@ -348,44 +346,44 @@ static void yaffs_HandleReadDataError(ya
+ }
+
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND)
++static void yaffs_check_written_block(yaffs_dev_t *dev, int nand_chunk)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_Spare *spare)
++ const yaffs_spare *spare)
+ {
+ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_Spare *spare)
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_spare *spare)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND)
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk)
+ {
+- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+- yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++ yaffs_get_block_info(dev, flash_block)->needs_retiring = 1;
+ /* Delete the chunk */
+- yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ }
+
+-static int yaffs_VerifyCompare(const __u8 *d0, const __u8 *d1,
+- const yaffs_Spare *s0, const yaffs_Spare *s1)
++static int yaffs_verify_cmp(const __u8 *d0, const __u8 *d1,
++ const yaffs_spare *s0, const yaffs_spare *s1)
+ {
+
+ if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
+- s0->tagByte0 != s1->tagByte0 ||
+- s0->tagByte1 != s1->tagByte1 ||
+- s0->tagByte2 != s1->tagByte2 ||
+- s0->tagByte3 != s1->tagByte3 ||
+- s0->tagByte4 != s1->tagByte4 ||
+- s0->tagByte5 != s1->tagByte5 ||
+- s0->tagByte6 != s1->tagByte6 ||
+- s0->tagByte7 != s1->tagByte7 ||
++ s0->tb0 != s1->tb0 ||
++ s0->tb1 != s1->tb1 ||
++ s0->tb2 != s1->tb2 ||
++ s0->tb3 != s1->tb3 ||
++ s0->tb4 != s1->tb4 ||
++ s0->tb5 != s1->tb5 ||
++ s0->tb6 != s1->tb6 ||
++ s0->tb7 != s1->tb7 ||
+ s0->ecc1[0] != s1->ecc1[0] ||
+ s0->ecc1[1] != s1->ecc1[1] ||
+ s0->ecc1[2] != s1->ecc1[2] ||
+@@ -398,53 +396,53 @@ static int yaffs_VerifyCompare(const __u
+ }
+ #endif /* NOTYET */
+
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_tags_compat_wr(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *eTags)
++ const yaffs_ext_tags *eTags)
+ {
+- yaffs_Spare spare;
+- yaffs_Tags tags;
++ yaffs_spare spare;
++ yaffs_tags_t tags;
+
+- yaffs_SpareInitialise(&spare);
++ yaffs_spare_init(&spare);
+
+- if (eTags->chunkDeleted)
+- spare.pageStatus = 0;
++ if (eTags->is_deleted)
++ spare.page_status = 0;
+ else {
+- tags.objectId = eTags->objectId;
+- tags.chunkId = eTags->chunkId;
++ tags.obj_id = eTags->obj_id;
++ tags.chunk_id = eTags->chunk_id;
+
+- tags.byteCountLSB = eTags->byteCount & 0x3ff;
++ tags.n_bytes_lsb = eTags->n_bytes & 0x3ff;
+
+- if (dev->nDataBytesPerChunk >= 1024)
+- tags.byteCountMSB = (eTags->byteCount >> 10) & 3;
++ if (dev->data_bytes_per_chunk >= 1024)
++ tags.n_bytes_msb = (eTags->n_bytes >> 10) & 3;
+ else
+- tags.byteCountMSB = 3;
++ tags.n_bytes_msb = 3;
+
+
+- tags.serialNumber = eTags->serialNumber;
++ tags.serial_number = eTags->serial_number;
+
+- if (!dev->useNANDECC && data)
+- yaffs_CalcECC(data, &spare);
++ if (!dev->param.use_nand_ecc && data)
++ yaffs_calc_ecc(data, &spare);
+
+- yaffs_LoadTagsIntoSpare(&spare, &tags);
++ yaffs_load_tags_to_spare(&spare, &tags);
+
+ }
+
+- return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
++ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+ }
+
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_tags_compat_rd(yaffs_dev_t *dev,
++ int nand_chunk,
+ __u8 *data,
+- yaffs_ExtendedTags *eTags)
++ yaffs_ext_tags *eTags)
+ {
+
+- yaffs_Spare spare;
+- yaffs_Tags tags;
+- yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_UNKNOWN;
++ yaffs_spare spare;
++ yaffs_tags_t tags;
++ yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+
+- static yaffs_Spare spareFF;
++ static yaffs_spare spareFF;
+ static int init;
+
+ if (!init) {
+@@ -452,33 +450,33 @@ int yaffs_TagsCompatabilityReadChunkWith
+ init = 1;
+ }
+
+- if (yaffs_ReadChunkFromNAND
+- (dev, chunkInNAND, data, &spare, &eccResult, 1)) {
++ if (yaffs_rd_chunk_nand
++ (dev, nand_chunk, data, &spare, &ecc_result, 1)) {
+ /* eTags may be NULL */
+ if (eTags) {
+
+ int deleted =
+- (yaffs_CountBits(spare.pageStatus) < 7) ? 1 : 0;
++ (yaffs_count_bits(spare.page_status) < 7) ? 1 : 0;
+
+- eTags->chunkDeleted = deleted;
+- eTags->eccResult = eccResult;
+- eTags->blockBad = 0; /* We're reading it */
++ eTags->is_deleted = deleted;
++ eTags->ecc_result = ecc_result;
++ eTags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+- eTags->chunkUsed =
++ eTags->chunk_used =
+ (memcmp(&spareFF, &spare, sizeof(spareFF)) !=
+ 0) ? 1 : 0;
+
+- if (eTags->chunkUsed) {
+- yaffs_GetTagsFromSpare(dev, &spare, &tags);
++ if (eTags->chunk_used) {
++ yaffs_get_tags_from_spare(dev, &spare, &tags);
+
+- eTags->objectId = tags.objectId;
+- eTags->chunkId = tags.chunkId;
+- eTags->byteCount = tags.byteCountLSB;
++ eTags->obj_id = tags.obj_id;
++ eTags->chunk_id = tags.chunk_id;
++ eTags->n_bytes = tags.n_bytes_lsb;
+
+- if (dev->nDataBytesPerChunk >= 1024)
+- eTags->byteCount |= (((unsigned) tags.byteCountMSB) << 10);
++ if (dev->data_bytes_per_chunk >= 1024)
++ eTags->n_bytes |= (((unsigned) tags.n_bytes_msb) << 10);
+
+- eTags->serialNumber = tags.serialNumber;
++ eTags->serial_number = tags.serial_number;
+ }
+ }
+
+@@ -488,49 +486,49 @@ int yaffs_TagsCompatabilityReadChunkWith
+ }
+ }
+
+-int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+- int blockInNAND)
++int yaffs_tags_compat_mark_bad(struct yaffs_dev_s *dev,
++ int flash_block)
+ {
+
+- yaffs_Spare spare;
++ yaffs_spare spare;
+
+- memset(&spare, 0xff, sizeof(yaffs_Spare));
++ memset(&spare, 0xff, sizeof(yaffs_spare));
+
+- spare.blockStatus = 'Y';
++ spare.block_status = 'Y';
+
+- yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock, NULL,
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+- yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock + 1,
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+
+ }
+
+-int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- __u32 *sequenceNumber)
++int yaffs_tags_compat_query_block(struct yaffs_dev_s *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ __u32 *seq_number)
+ {
+
+- yaffs_Spare spare0, spare1;
+- static yaffs_Spare spareFF;
++ yaffs_spare spare0, spare1;
++ static yaffs_spare spareFF;
+ static int init;
+- yaffs_ECCResult dummy;
++ yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spareFF, 0xFF, sizeof(spareFF));
+ init = 1;
+ }
+
+- *sequenceNumber = 0;
++ *seq_number = 0;
+
+- yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock, NULL,
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+ &spare0, &dummy, 1);
+- yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock + 1, NULL,
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1, NULL,
+ &spare1, &dummy, 1);
+
+- if (yaffs_CountBits(spare0.blockStatus & spare1.blockStatus) < 7)
++ if (yaffs_count_bits(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spareFF, &spare0, sizeof(spareFF)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+--- a/fs/yaffs2/yaffs_tagscompat.h
++++ b/fs/yaffs2/yaffs_tagscompat.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,23 +17,23 @@
+ #define __YAFFS_TAGSCOMPAT_H__
+
+ #include "yaffs_guts.h"
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_tags_compat_wr(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND,
++ const yaffs_ext_tags *tags);
++int yaffs_tags_compat_rd(yaffs_dev_t *dev,
++ int nand_chunk,
+ __u8 *data,
+- yaffs_ExtendedTags *tags);
+-int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+- int blockNo);
+-int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- __u32 *sequenceNumber);
++ yaffs_ext_tags *tags);
++int yaffs_tags_compat_mark_bad(struct yaffs_dev_s *dev,
++ int block_no);
++int yaffs_tags_compat_query_block(struct yaffs_dev_s *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ __u32 *seq_number);
+
+-void yaffs_CalcTagsECC(yaffs_Tags *tags);
+-int yaffs_CheckECCOnTags(yaffs_Tags *tags);
+-int yaffs_CountBits(__u8 byte);
++void yaffs_calc_tags_ecc(yaffs_tags_t *tags);
++int yaffs_check_tags_ecc(yaffs_tags_t *tags);
++int yaffs_count_bits(__u8 byte);
+
+ #endif
+--- a/fs/yaffs2/yaffs_tagsvalidity.c
++++ b/fs/yaffs2/yaffs_tagsvalidity.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -13,16 +13,16 @@
+
+ #include "yaffs_tagsvalidity.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags *tags)
++void yaffs_init_tags(yaffs_ext_tags *tags)
+ {
+- memset(tags, 0, sizeof(yaffs_ExtendedTags));
+- tags->validMarker0 = 0xAAAAAAAA;
+- tags->validMarker1 = 0x55555555;
++ memset(tags, 0, sizeof(yaffs_ext_tags));
++ tags->validity1 = 0xAAAAAAAA;
++ tags->validty1 = 0x55555555;
+ }
+
+-int yaffs_ValidateTags(yaffs_ExtendedTags *tags)
++int yaffs_validate_tags(yaffs_ext_tags *tags)
+ {
+- return (tags->validMarker0 == 0xAAAAAAAA &&
+- tags->validMarker1 == 0x55555555);
++ return (tags->validity1 == 0xAAAAAAAA &&
++ tags->validty1 == 0x55555555);
+
+ }
+--- a/fs/yaffs2/yaffs_tagsvalidity.h
++++ b/fs/yaffs2/yaffs_tagsvalidity.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -19,6 +19,6 @@
+
+ #include "yaffs_guts.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
+-int yaffs_ValidateTags(yaffs_ExtendedTags *tags);
++void yaffs_init_tags(yaffs_ext_tags *tags);
++int yaffs_validate_tags(yaffs_ext_tags *tags);
+ #endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_trace.h
+@@ -0,0 +1,60 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YTRACE_H__
++#define __YTRACE_H__
++
++extern unsigned int yaffs_trace_mask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++
++#define YAFFS_TRACE_OS 0x00000002
++#define YAFFS_TRACE_ALLOCATE 0x00000004
++#define YAFFS_TRACE_SCAN 0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
++#define YAFFS_TRACE_ERASE 0x00000020
++#define YAFFS_TRACE_GC 0x00000040
++#define YAFFS_TRACE_WRITE 0x00000080
++#define YAFFS_TRACE_TRACING 0x00000100
++#define YAFFS_TRACE_DELETION 0x00000200
++#define YAFFS_TRACE_BUFFERS 0x00000400
++#define YAFFS_TRACE_NANDACCESS 0x00000800
++#define YAFFS_TRACE_GC_DETAIL 0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
++#define YAFFS_TRACE_MTD 0x00004000
++#define YAFFS_TRACE_CHECKPOINT 0x00008000
++
++#define YAFFS_TRACE_VERIFY 0x00010000
++#define YAFFS_TRACE_VERIFY_NAND 0x00020000
++#define YAFFS_TRACE_VERIFY_FULL 0x00040000
++#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
++
++#define YAFFS_TRACE_SYNC 0x00100000
++#define YAFFS_TRACE_BACKGROUND 0x00200000
++#define YAFFS_TRACE_LOCK 0x00400000
++
++#define YAFFS_TRACE_ERROR 0x40000000
++#define YAFFS_TRACE_BUG 0x80000000
++#define YAFFS_TRACE_ALWAYS 0xF0000000
++
++
++#define T(mask, p) do { if ((mask) & (yaffs_trace_mask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_verify.c
+@@ -0,0 +1,626 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++
++#include "yaffs_verify.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++int yaffs_skip_verification(yaffs_dev_t *dev)
++{
++ dev=dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_full_verification(yaffs_dev_t *dev)
++{
++ dev=dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_nand_verification(yaffs_dev_t *dev)
++{
++ dev=dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++
++static const char *block_stateName[] = {
++"Unknown",
++"Needs scanning",
++"Scanning",
++"Empty",
++"Allocating",
++"Full",
++"Dirty",
++"Checkpoint",
++"Collecting",
++"Dead"
++};
++
++
++void yaffs_verify_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n)
++{
++ int actuallyUsed;
++ int inUse;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Report illegal runtime states */
++ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->block_state));
++
++ switch (bi->block_state) {
++ case YAFFS_BLOCK_STATE_UNKNOWN:
++ case YAFFS_BLOCK_STATE_SCANNING:
++ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
++ n, block_stateName[bi->block_state]));
++ }
++
++ /* Check pages in use and soft deletions are legal */
++
++ actuallyUsed = bi->pages_in_use - bi->soft_del_pages;
++
++ if (bi->pages_in_use < 0 || bi->pages_in_use > dev->param.chunks_per_block ||
++ bi->soft_del_pages < 0 || bi->soft_del_pages > dev->param.chunks_per_block ||
++ actuallyUsed < 0 || actuallyUsed > dev->param.chunks_per_block)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pages_in_used %d soft_del_pages %d"TENDSTR),
++ n, bi->pages_in_use, bi->soft_del_pages));
++
++
++ /* Check chunk bitmap legal */
++ inUse = yaffs_count_chunk_bits(dev, n);
++ if (inUse != bi->pages_in_use)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pages_in_use %d counted chunk bits %d"TENDSTR),
++ n, bi->pages_in_use, inUse));
++
++}
++
++
++
++void yaffs_verify_collected_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n)
++{
++ yaffs_verify_blk(dev, bi, n);
++
++ /* After collection the block should be in the erased state */
++
++ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
++ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
++ n, bi->block_state));
++ }
++}
++
++void yaffs_verify_blocks(yaffs_dev_t *dev)
++{
++ int i;
++ int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
++ int nIllegalBlockStates = 0;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, i);
++ yaffs_verify_blk(dev, bi, i);
++
++ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
++ nBlocksPerState[bi->block_state]++;
++ else
++ nIllegalBlockStates++;
++ }
++
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
++
++ T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
++
++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("%s %d blocks"TENDSTR),
++ block_stateName[i], nBlocksPerState[i]));
++
++ if (dev->blocks_in_checkpt != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
++ dev->blocks_in_checkpt, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
++
++ if (dev->n_erased_blocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
++ dev->n_erased_blocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
++
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
++ nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
++
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
++ * case those tests will not be performed.
++ */
++void yaffs_verify_oh(yaffs_obj_t *obj, yaffs_obj_header *oh, yaffs_ext_tags *tags, int parentCheck)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!(tags && obj && oh)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Verifying object header tags %p obj %p oh %p"TENDSTR),
++ tags, obj, oh));
++ return;
++ }
++
++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++ oh->type > YAFFS_OBJECT_TYPE_MAX)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
++ tags->obj_id, oh->type));
++
++ if (tags->obj_id != obj->obj_id)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch obj_id %d"TENDSTR),
++ tags->obj_id, obj->obj_id));
++
++
++ /*
++ * Check that the object's parent ids match if parentCheck requested.
++ *
++ * Tests do not apply to the root object.
++ */
++
++ if (parentCheck && tags->obj_id > 1 && !obj->parent)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parent_id %d obj->parent is NULL"TENDSTR),
++ tags->obj_id, oh->parent_obj_id));
++
++ if (parentCheck && obj->parent &&
++ oh->parent_obj_id != obj->parent->obj_id &&
++ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parent_id %d parent_obj_id %d"TENDSTR),
++ tags->obj_id, oh->parent_obj_id, obj->parent->obj_id));
++
++ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header name is NULL"TENDSTR),
++ obj->obj_id));
++
++ if (tags->obj_id > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header name is 0xFF"TENDSTR),
++ obj->obj_id));
++}
++
++
++#if 0
++/* Not being used, but don't want to throw away yet */
++int yaffs_verify_tnode_worker(yaffs_obj_t *obj, yaffs_tnode_t *tn,
++ __u32 level, int chunk_offset)
++{
++ int i;
++ yaffs_dev_t *dev = obj->my_dev;
++ int ok = 1;
++
++ if (tn) {
++ if (level > 0) {
++
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++ if (tn->internal[i]) {
++ ok = yaffs_verify_tnode_worker(obj,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++ }
++ }
++ } else if (level == 0) {
++ yaffs_ext_tags tags;
++ __u32 obj_id = obj->obj_id;
++
++ chunk_offset <<= YAFFS_TNODES_LEVEL0_BITS;
++
++ for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
++ __u32 theChunk = yaffs_get_group_base(dev, tn, i);
++
++ if (theChunk > 0) {
++ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.obj_id,tags.chunk_id,theChunk)); */
++ yaffs_rd_chunk_tags_nand(dev, theChunk, NULL, &tags);
++ if (tags.obj_id != obj_id || tags.chunk_id != chunk_offset) {
++ T(~0, (TSTR("Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ obj_id, chunk_offset, theChunk,
++ tags.obj_id, tags.chunk_id));
++ }
++ }
++ chunk_offset++;
++ }
++ }
++ }
++
++ return ok;
++
++}
++
++#endif
++
++void yaffs_verify_file(yaffs_obj_t *obj)
++{
++ int requiredTallness;
++ int actualTallness;
++ __u32 lastChunk;
++ __u32 x;
++ __u32 i;
++ yaffs_dev_t *dev;
++ yaffs_ext_tags tags;
++ yaffs_tnode_t *tn;
++ __u32 obj_id;
++
++ if (!obj)
++ return;
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ dev = obj->my_dev;
++ obj_id = obj->obj_id;
++
++ /* Check file size is consistent with tnode depth */
++ lastChunk = obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1;
++ x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
++ requiredTallness = 0;
++ while (x > 0) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ requiredTallness++;
++ }
++
++ actualTallness = obj->variant.file_variant.top_level;
++
++ /* Check that the chunks in the tnode tree are all correct.
++ * We do this by scanning through the tnode tree and
++ * checking the tags for every chunk match.
++ */
++
++ if (yaffs_skip_nand_verification(dev))
++ return;
++
++ for (i = 1; i <= lastChunk; i++) {
++ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
++
++ if (tn) {
++ __u32 theChunk = yaffs_get_group_base(dev, tn, i);
++ if (theChunk > 0) {
++ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),obj_id,i,theChunk)); */
++ yaffs_rd_chunk_tags_nand(dev, theChunk, NULL, &tags);
++ if (tags.obj_id != obj_id || tags.chunk_id != i) {
++ T(~0, (TSTR("Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ obj_id, i, theChunk,
++ tags.obj_id, tags.chunk_id));
++ }
++ }
++ }
++ }
++}
++
++
++void yaffs_verify_link(yaffs_obj_t *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify sane equivalent object */
++}
++
++void yaffs_verify_symlink(yaffs_obj_t *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify symlink string */
++}
++
++void yaffs_verify_special(yaffs_obj_t *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++}
++
++void yaffs_verify_obj(yaffs_obj_t *obj)
++{
++ yaffs_dev_t *dev;
++
++ __u32 chunkMin;
++ __u32 chunkMax;
++
++ __u32 chunk_idOk;
++ __u32 chunkInRange;
++ __u32 chunkShouldNotBeDeleted;
++ __u32 chunkValid;
++
++ if (!obj)
++ return;
++
++ if (obj->being_created)
++ return;
++
++ dev = obj->my_dev;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Check sane object header chunk */
++
++ chunkMin = dev->internal_start_block * dev->param.chunks_per_block;
++ chunkMax = (dev->internal_end_block+1) * dev->param.chunks_per_block - 1;
++
++ chunkInRange = (((unsigned)(obj->hdr_chunk)) >= chunkMin && ((unsigned)(obj->hdr_chunk)) <= chunkMax);
++ chunk_idOk = chunkInRange || (obj->hdr_chunk == 0);
++ chunkValid = chunkInRange &&
++ yaffs_check_chunk_bit(dev,
++ obj->hdr_chunk / dev->param.chunks_per_block,
++ obj->hdr_chunk % dev->param.chunks_per_block);
++ chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++
++ if (!obj->fake &&
++ (!chunk_idOk || chunkShouldNotBeDeleted)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has chunk_id %d %s %s"TENDSTR),
++ obj->obj_id, obj->hdr_chunk,
++ chunk_idOk ? "" : ",out of range",
++ chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
++ }
++
++ if (chunkValid && !yaffs_skip_nand_verification(dev)) {
++ yaffs_ext_tags tags;
++ yaffs_obj_header *oh;
++ __u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
++
++ oh = (yaffs_obj_header *)buffer;
++
++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer,
++ &tags);
++
++ yaffs_verify_oh(obj, oh, &tags, 1);
++
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
++ }
++
++ /* Verify it has a parent */
++ if (obj && !obj->fake &&
++ (!obj->parent || obj->parent->my_dev != dev)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
++ obj->obj_id, obj->parent));
++ }
++
++ /* Verify parent is a directory */
++ if (obj->parent && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
++ obj->obj_id, obj->parent->variant_type));
++ }
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ yaffs_verify_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ yaffs_verify_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ yaffs_verify_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ yaffs_verify_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ yaffs_verify_special(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has illegaltype %d"TENDSTR),
++ obj->obj_id, obj->variant_type));
++ break;
++ }
++}
++
++void yaffs_verify_objects(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ int i;
++ struct ylist_head *lh;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Iterate through the objects in each hash entry */
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->obj_bucket[i].list) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, hash_link);
++ yaffs_verify_obj(obj);
++ }
++ }
++ }
++}
++
++
++void yaffs_verify_obj_in_dir(yaffs_obj_t *obj)
++{
++ struct ylist_head *lh;
++ yaffs_obj_t *listObj;
++
++ int count = 0;
++
++ if (!obj) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!obj->parent) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &obj->parent->variant.dir_variant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_obj_t, siblings);
++ yaffs_verify_obj(listObj);
++ if (obj == listObj)
++ count++;
++ }
++ }
++
++ if (count != 1) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
++ YBUG();
++ }
++}
++
++void yaffs_verify_dir(yaffs_obj_t *directory)
++{
++ struct ylist_head *lh;
++ yaffs_obj_t *listObj;
++
++ if (!directory) {
++ YBUG();
++ return;
++ }
++
++ if (yaffs_skip_full_verification(directory->my_dev))
++ return;
++
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variant_type));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &directory->variant.dir_variant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_obj_t, siblings);
++ if (listObj->parent != directory) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
++ YBUG();
++ }
++ yaffs_verify_obj_in_dir(listObj);
++ }
++ }
++}
++
++static int yaffs_free_verification_failures;
++
++void yaffs_verify_free_chunks(yaffs_dev_t *dev)
++{
++ int counted;
++ int difference;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ counted = yaffs_count_free_chunks(dev);
++
++ difference = dev->n_free_chunks - counted;
++
++ if (difference) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
++ dev->n_free_chunks, counted, difference));
++ yaffs_free_verification_failures++;
++ }
++}
++
++int yaffs_verify_file_sane(yaffs_obj_t *in)
++{
++#if 0
++ int chunk;
++ int n_chunks;
++ int fSize;
++ int failed = 0;
++ int obj_id;
++ yaffs_tnode_t *tn;
++ yaffs_tags_t localTags;
++ yaffs_tags_t *tags = &localTags;
++ int theChunk;
++ int is_deleted;
++
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ obj_id = in->obj_id;
++ fSize = in->variant.file_variant.file_size;
++ n_chunks =
++ (fSize + in->my_dev->data_bytes_per_chunk - 1) / in->my_dev->data_bytes_per_chunk;
++
++ for (chunk = 1; chunk <= n_chunks; chunk++) {
++ tn = yaffs_find_tnode_0(in->my_dev, &in->variant.file_variant,
++ chunk);
++
++ if (tn) {
++
++ theChunk = yaffs_get_group_base(dev, tn, chunk);
++
++ if (yaffs_check_chunk_bits
++ (dev, theChunk / dev->param.chunks_per_block,
++ theChunk % dev->param.chunks_per_block)) {
++
++ yaffs_rd_chunk_tags_nand(in->my_dev, theChunk,
++ tags,
++ &is_deleted);
++ if (yaffs_tags_match
++ (tags, in->obj_id, chunk, is_deleted)) {
++ /* found it; */
++
++ }
++ } else {
++
++ failed = 1;
++ }
++
++ } else {
++ /* T(("No level 0 found for %d\n", chunk)); */
++ }
++ }
++
++ return failed ? YAFFS_FAIL : YAFFS_OK;
++#else
++ in=in;
++ return YAFFS_OK;
++#endif
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_verify.h
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __YAFFS_VERIFY_H__
++#define __YAFFS_VERIFY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n);
++void yaffs_verify_collected_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n);
++void yaffs_verify_blocks(yaffs_dev_t *dev);
++
++void yaffs_verify_oh(yaffs_obj_t *obj, yaffs_obj_header *oh, yaffs_ext_tags *tags, int parentCheck);
++void yaffs_verify_file(yaffs_obj_t *obj);
++void yaffs_verify_link(yaffs_obj_t *obj);
++void yaffs_verify_symlink(yaffs_obj_t *obj);
++void yaffs_verify_special(yaffs_obj_t *obj);
++void yaffs_verify_obj(yaffs_obj_t *obj);
++void yaffs_verify_objects(yaffs_dev_t *dev);
++void yaffs_verify_obj_in_dir(yaffs_obj_t *obj);
++void yaffs_verify_dir(yaffs_obj_t *directory);
++void yaffs_verify_free_chunks(yaffs_dev_t *dev);
++
++int yaffs_verify_file_sane(yaffs_obj_t *obj);
++
++int yaffs_skip_verification(yaffs_dev_t *dev);
++
++#endif
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -0,0 +1,3576 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes:
++ * >> 2.4: sb->u.generic_sbp points to the yaffs_dev_t associated with
++ * this superblock
++ * >> 2.6: sb->s_fs_info points to the yaffs_dev_t associated with this
++ * superblock
++ * >> inode->u.generic_ip points to the associated yaffs_obj_t.
++ */
++
++/*
++ * There are two variants of the VFS glue code. This variant should compile
++ * for any version of Linux.
++ */
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
++#define YAFFS_COMPILE_BACKGROUND
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6, 23))
++#define YAFFS_COMPILE_FREEZER
++#endif
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
++#define YAFFS_COMPILE_EXPORTFS
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35))
++#define YAFFS_USE_SETATTR_COPY
++#define YAFFS_USE_TRUNCATE_SETSIZE
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35))
++#define YAFFS_HAS_EVICT_INODE
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++#define YAFFS_NEW_FOLLOW_LINK 1
++#else
++#define YAFFS_NEW_FOLLOW_LINK 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++#include <linux/namei.h>
++#endif
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++#include <linux/exportfs.h>
++#endif
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#endif
++#ifdef YAFFS_COMPILE_FREEZER
++#include <linux/freezer.h>
++#endif
++
++#include <asm/div64.h>
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++#include <linux/statfs.h>
++
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define BDEVNAME_SIZE 0
++#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT (&proc_root)
++#else
++#define YPROC_ROOT NULL
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
++#define Y_INIT_TIMER(a) init_timer(a)
++#else
++#define Y_INIT_TIMER(a) init_timer_on_stack(a)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++ uint64_t result = partition_size;
++ do_div(result, block_size);
++ return (uint32_t)result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
++#include <linux/mtd/mtd.h>
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++
++#include "yaffs_linux.h"
++
++#include "yaffs_mtdif.h"
++#include "yaffs_mtdif1.h"
++#include "yaffs_mtdif2.h"
++
++unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
++unsigned int yaffs_gc_control = 1;
++unsigned int yaffs_bg_enable = 1;
++
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_trace_mask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++module_param(yaffs_gc_control, uint, 0644);
++module_param(yaffs_bg_enable, uint, 0644);
++#else
++MODULE_PARM(yaffs_trace_mask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++MODULE_PARM(yaffs_gc_control, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++static void yaffs_read_inode(struct inode *inode);
++
++#else
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
++#else
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
++#endif
++
++#define yaffs_InodeToObject(iptr) ((yaffs_obj_t *)(yaffs_InodeToObjectLV(iptr)))
++#define yaffs_dentry_to_obj(dptr) yaffs_InodeToObject((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->s_fs_info)
++#else
++#define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->u.generic_sbp)
++#endif
++
++
++#define update_dir_time(dir) do {\
++ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
++ } while(0)
++
++static void yaffs_put_super(struct super_block *sb);
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++ loff_t *pos);
++static ssize_t yaffs_hold_space(struct file *f);
++static void yaffs_release_space(struct file *f);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id);
++#else
++static int yaffs_file_flush(struct file *file);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync);
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++ int datasync);
++#endif
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++ struct nameidata *n);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *n);
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
++#endif
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry);
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname);
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t dev);
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ int dev);
++#endif
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry);
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait);
++static void yaffs_write_super(struct super_block *sb);
++#else
++static int yaffs_sync_fs(struct super_block *sb);
++static int yaffs_write_super(struct super_block *sb);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
++#endif
++
++#ifdef YAFFS_HAS_PUT_INODE
++static void yaffs_put_inode(struct inode *inode);
++#endif
++
++#ifdef YAFFS_HAS_EVICT_INODE
++static void yaffs_evict_inode(struct inode *);
++#else
++static void yaffs_delete_inode(struct inode *);
++static void yaffs_clear_inode(struct inode *);
++#endif
++
++static int yaffs_readpage(struct file *file, struct page *page);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
++#else
++static int yaffs_writepage(struct page *page);
++#endif
++
++#ifdef CONFIG_YAFFS_XATTR
++int yaffs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags);
++ssize_t yaffs_getxattr(struct dentry *dentry, const char *name, void *buff,
++ size_t size);
++int yaffs_removexattr(struct dentry *dentry, const char *name);
++ssize_t yaffs_listxattr(struct dentry *dentry, char *buff, size_t size);
++#endif
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END != 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata);
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata);
++#else
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++ unsigned offset, unsigned to);
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++ unsigned to);
++
++#endif
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen);
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias);
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#endif
++
++static void yaffs_touch_super(yaffs_dev_t *dev);
++
++static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin);
++
++static int yaffs_vfs_setattr(struct inode *, struct iattr *);
++
++
++static struct address_space_operations yaffs_file_address_operations = {
++ .readpage = yaffs_readpage,
++ .writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++ .write_begin = yaffs_write_begin,
++ .write_end = yaffs_write_end,
++#else
++ .prepare_write = yaffs_prepare_write,
++ .commit_write = yaffs_commit_write,
++#endif
++};
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .splice_read = generic_file_splice_read,
++ .splice_write = generic_file_splice_write,
++ .llseek = generic_file_llseek,
++};
++
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .sendfile = generic_file_sendfile,
++};
++
++#else
++
++static const struct file_operations yaffs_file_operations = {
++ .read = generic_file_read,
++ .write = generic_file_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ .sendfile = generic_file_sendfile,
++#endif
++};
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
++static void zero_user_segment(struct page *page, unsigned start, unsigned end)
++{
++ void * kaddr = kmap_atomic(page, KM_USER0);
++ memset(kaddr + start, 0, end - start);
++ kunmap_atomic(kaddr, KM_USER0);
++ flush_dcache_page(page);
++}
++#endif
++
++
++static const struct inode_operations yaffs_file_inode_operations = {
++ .setattr = yaffs_setattr,
++#ifdef CONFIG_YAFFS_XATTR
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++#endif
++};
++
++static const struct inode_operations yaffs_symlink_inode_operations = {
++ .readlink = yaffs_readlink,
++ .follow_link = yaffs_follow_link,
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ .put_link = yaffs_put_link,
++#endif
++ .setattr = yaffs_setattr,
++#ifdef CONFIG_YAFFS_XATTR
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++#endif
++};
++
++static const struct inode_operations yaffs_dir_inode_operations = {
++ .create = yaffs_create,
++ .lookup = yaffs_lookup,
++ .link = yaffs_link,
++ .unlink = yaffs_unlink,
++ .symlink = yaffs_symlink,
++ .mkdir = yaffs_mkdir,
++ .rmdir = yaffs_unlink,
++ .mknod = yaffs_mknod,
++ .rename = yaffs_rename,
++ .setattr = yaffs_setattr,
++#ifdef CONFIG_YAFFS_XATTR
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++#endif
++};
++
++static const struct file_operations yaffs_dir_operations = {
++ .read = generic_read_dir,
++ .readdir = yaffs_readdir,
++ .fsync = yaffs_sync_object,
++ .llseek = yaffs_dir_llseek,
++};
++
++static const struct super_operations yaffs_super_ops = {
++ .statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
++ .read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
++ .put_inode = yaffs_put_inode,
++#endif
++ .put_super = yaffs_put_super,
++#ifdef YAFFS_HAS_EVICT_INODE
++ .evict_inode = yaffs_evict_inode,
++#else
++ .delete_inode = yaffs_delete_inode,
++ .clear_inode = yaffs_clear_inode,
++#endif
++ .sync_fs = yaffs_sync_fs,
++ .write_super = yaffs_write_super,
++};
++
++
++static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
++{
++#ifdef YAFFS_USE_SETATTR_COPY
++ setattr_copy(inode,attr);
++ return 0;
++#else
++ return inode_setattr(inode, attr);
++#endif
++
++}
++
++static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
++{
++#ifdef YAFFS_USE_TRUNCATE_SETSIZE
++ truncate_setsize(inode,newsize);
++ return 0;
++#else
++ truncate_inode_pages(&inode->i_data,newsize);
++ return 0;
++#endif
++
++}
++
++static unsigned yaffs_gc_control_callback(yaffs_dev_t *dev)
++{
++ return yaffs_gc_control;
++}
++
++static void yaffs_gross_lock(yaffs_dev_t *dev)
++{
++ T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current));
++ down(&(yaffs_dev_to_lc(dev)->grossLock));
++ T(YAFFS_TRACE_LOCK, (TSTR("yaffs locked %p\n"), current));
++}
++
++static void yaffs_gross_unlock(yaffs_dev_t *dev)
++{
++ T(YAFFS_TRACE_LOCK, (TSTR("yaffs unlocking %p\n"), current));
++ up(&(yaffs_dev_to_lc(dev)->grossLock));
++}
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++
++static struct inode *
++yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino, uint32_t generation)
++{
++ return Y_IGET(sb, ino);
++}
++
++static struct dentry *
++yaffs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type)
++{
++ return generic_fh_to_dentry(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode) ;
++}
++
++static struct dentry *
++ yaffs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type)
++{
++ return generic_fh_to_parent(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode);
++}
++
++struct dentry *yaffs2_get_parent(struct dentry *dentry)
++{
++
++ struct super_block *sb = dentry->d_inode->i_sb;
++ struct dentry *parent = ERR_PTR(-ENOENT);
++ struct inode *inode;
++ unsigned long parent_ino;
++ yaffs_obj_t *d_obj;
++ yaffs_obj_t *parent_obj;
++
++ d_obj = yaffs_InodeToObject(dentry->d_inode);
++
++ if (d_obj) {
++ parent_obj = d_obj->parent;
++ if (parent_obj) {
++ parent_ino = yaffs_get_obj_inode(parent_obj);
++ inode = Y_IGET(sb, parent_ino);
++
++ if (IS_ERR(inode)) {
++ parent = ERR_CAST(inode);
++ } else {
++ parent = d_obtain_alias(inode);
++ if (!IS_ERR(parent)) {
++ parent = ERR_PTR(-ENOMEM);
++ iput(inode);
++ }
++ }
++ }
++ }
++
++ return parent;
++}
++
++/* Just declare a zero structure as a NULL value implies
++ * using the default functions of exportfs.
++ */
++
++static struct export_operations yaffs_export_ops =
++{
++ .fh_to_dentry = yaffs2_fh_to_dentry,
++ .fh_to_parent = yaffs2_fh_to_parent,
++ .get_parent = yaffs2_get_parent,
++} ;
++
++#endif
++
++/*-----------------------------------------------------------------*/
++/* Directory search context allows us to unlock access to yaffs during
++ * filldir without causing problems with the directory being modified.
++ * This is similar to the tried and tested mechanism used in yaffs direct.
++ *
++ * A search context iterates along a doubly linked list of siblings in the
++ * directory. If the iterating object is deleted then this would corrupt
++ * the list iteration, likely causing a crash. The search context avoids
++ * this by using the remove_obj_fn to move the search context to the
++ * next object before the object is deleted.
++ *
++ * Many readdirs (and thus seach conexts) may be alive simulateously so
++ * each yaffs_dev_t has a list of these.
++ *
++ * A seach context lives for the duration of a readdir.
++ *
++ * All these functions must be called while yaffs is locked.
++ */
++
++struct yaffs_SearchContext {
++ yaffs_dev_t *dev;
++ yaffs_obj_t *dirObj;
++ yaffs_obj_t *nextReturn;
++ struct ylist_head others;
++};
++
++/*
++ * yaffs_NewSearch() creates a new search context, initialises it and
++ * adds it to the device's search context list.
++ *
++ * Called at start of readdir.
++ */
++static struct yaffs_SearchContext * yaffs_NewSearch(yaffs_obj_t *dir)
++{
++ yaffs_dev_t *dev = dir->my_dev;
++ struct yaffs_SearchContext *sc = YMALLOC(sizeof(struct yaffs_SearchContext));
++ if(sc){
++ sc->dirObj = dir;
++ sc->dev = dev;
++ if( ylist_empty(&sc->dirObj->variant.dir_variant.children))
++ sc->nextReturn = NULL;
++ else
++ sc->nextReturn = ylist_entry(
++ dir->variant.dir_variant.children.next,
++ yaffs_obj_t,siblings);
++ YINIT_LIST_HEAD(&sc->others);
++ ylist_add(&sc->others,&(yaffs_dev_to_lc(dev)->searchContexts));
++ }
++ return sc;
++}
++
++/*
++ * yaffs_search_end() disposes of a search context and cleans up.
++ */
++static void yaffs_search_end(struct yaffs_SearchContext * sc)
++{
++ if(sc){
++ ylist_del(&sc->others);
++ YFREE(sc);
++ }
++}
++
++/*
++ * yaffs_search_advance() moves a search context to the next object.
++ * Called when the search iterates or when an object removal causes
++ * the search context to be moved to the next object.
++ */
++static void yaffs_search_advance(struct yaffs_SearchContext *sc)
++{
++ if(!sc)
++ return;
++
++ if( sc->nextReturn == NULL ||
++ ylist_empty(&sc->dirObj->variant.dir_variant.children))
++ sc->nextReturn = NULL;
++ else {
++ struct ylist_head *next = sc->nextReturn->siblings.next;
++
++ if( next == &sc->dirObj->variant.dir_variant.children)
++ sc->nextReturn = NULL; /* end of list */
++ else
++ sc->nextReturn = ylist_entry(next,yaffs_obj_t,siblings);
++ }
++}
++
++/*
++ * yaffs_remove_obj_callback() is called when an object is unlinked.
++ * We check open search contexts and advance any which are currently
++ * on the object being iterated.
++ */
++static void yaffs_remove_obj_callback(yaffs_obj_t *obj)
++{
++
++ struct ylist_head *i;
++ struct yaffs_SearchContext *sc;
++ struct ylist_head *search_contexts = &(yaffs_dev_to_lc(obj->my_dev)->searchContexts);
++
++
++ /* Iterate through the directory search contexts.
++ * If any are currently on the object being removed, then advance
++ * the search context to the next object to prevent a hanging pointer.
++ */
++ ylist_for_each(i, search_contexts) {
++ if (i) {
++ sc = ylist_entry(i, struct yaffs_SearchContext,others);
++ if(sc->nextReturn == obj)
++ yaffs_search_advance(sc);
++ }
++ }
++
++}
++
++
++/*-----------------------------------------------------------------*/
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen)
++{
++ unsigned char *alias;
++ int ret;
++
++ yaffs_dev_t *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++
++ yaffs_gross_unlock(dev);
++
++ if (!alias)
++ return -ENOMEM;
++
++ ret = vfs_readlink(dentry, buffer, buflen, alias);
++ kfree(alias);
++ return ret;
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#endif
++{
++ unsigned char *alias;
++ int ret;
++ yaffs_dev_t *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++ yaffs_gross_unlock(dev);
++
++ if (!alias) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ nd_set_link(nd, alias);
++ ret = (int)alias;
++out:
++ return ERR_PTR(ret);
++#else
++ ret = vfs_follow_link(nd, alias);
++ kfree(alias);
++out:
++ return ret;
++#endif
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias) {
++ kfree(alias);
++}
++#endif
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++ yaffs_obj_t *obj);
++
++/*
++ * Lookup is used to find objects in the fs
++ */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++ yaffs_obj_t *obj;
++ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
++
++ yaffs_dev_t *dev = yaffs_InodeToObject(dir)->my_dev;
++
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_lock(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_lookup for %d:%s\n"),
++ yaffs_InodeToObject(dir)->obj_id, dentry->d_name.name));
++
++ obj = yaffs_find_by_name(yaffs_InodeToObject(dir),
++ dentry->d_name.name);
++
++ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
++
++ /* Can't hold gross lock when calling yaffs_get_inode() */
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_lookup found %d\n"), obj->obj_id));
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++
++ if (inode) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_loookup dentry \n")));
++/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
++ * d_add even if NULL inode */
++#if 0
++ /*dget(dentry); // try to solve directory bug */
++ d_add(dentry, inode);
++
++ /* return dentry; */
++ return NULL;
++#endif
++ }
++
++ } else {
++ T(YAFFS_TRACE_OS,(TSTR("yaffs_lookup not found\n")));
++
++ }
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++ d_add(dentry, inode);
++
++ return NULL;
++}
++
++
++#ifdef YAFFS_HAS_PUT_INODE
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_put_inode: ino %d, count %d\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count)));
++
++}
++#endif
++
++
++static void yaffs_unstitch_obj(struct inode *inode, yaffs_obj_t *obj)
++{
++ /* Clear the association between the inode and
++ * the yaffs_obj_t.
++ */
++ obj->my_inode = NULL;
++ yaffs_InodeToObjectLV(inode) = NULL;
++
++ /* If the object freeing was deferred, then the real
++ * free happens now.
++ * This should fix the inode inconsistency problem.
++ */
++ yaffs_handle_defered_free(obj);
++}
++
++#ifdef YAFFS_HAS_EVICT_INODE
++/* yaffs_evict_inode combines into one operation what was previously done in
++ * yaffs_clear_inode() and yaffs_delete_inode()
++ *
++ */
++static void yaffs_evict_inode( struct inode *inode)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++ int deleteme = 0;
++
++ obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_evict_inode: ino %d, count %d %s\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
++
++ if (!inode->i_nlink && !is_bad_inode(inode))
++ deleteme = 1;
++ truncate_inode_pages(&inode->i_data,0);
++ end_writeback(inode);
++
++ if(deleteme && obj){
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode,obj);
++ yaffs_gross_unlock(dev);
++ }
++
++
++}
++#else
++
++/* clear is called to tell the fs to release any per-inode data it holds.
++ * The object might still exist on disk and is just being thrown out of the cache
++ * or else the object has actually been deleted and we're being called via
++ * the chain
++ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
++ */
++
++static void yaffs_clear_inode(struct inode *inode)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++
++ obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_clear_inode: ino %d, count %d %s\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode,obj);
++ yaffs_gross_unlock(dev);
++ }
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_delete_inode: ino %d, count %d %s\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ truncate_inode_pages(&inode->i_data, 0);
++#endif
++ clear_inode(inode);
++}
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++ yaffs_obj_t *obj = yaffs_dentry_to_obj(file->f_dentry);
++
++ yaffs_dev_t *dev = obj->my_dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_flush object %d (%s)\n"), obj->obj_id,
++ obj->dirty ? "dirty" : "clean"));
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_file(obj, 1, 0);
++
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++ /* Lifted from jffs2 */
++
++ yaffs_obj_t *obj;
++ unsigned char *pg_buf;
++ int ret;
++
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readpage_nolock at %08x, size %08x\n"),
++ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
++ (unsigned)PAGE_CACHE_SIZE));
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ BUG_ON(!PageLocked(pg));
++#else
++ if (!PageLocked(pg))
++ PAGE_BUG(pg);
++#endif
++
++ pg_buf = kmap(pg);
++ /* FIXME: Can kmap fail? */
++
++ yaffs_gross_lock(dev);
++
++ ret = yaffs_file_rd(obj, pg_buf,
++ pg->index << PAGE_CACHE_SHIFT,
++ PAGE_CACHE_SIZE);
++
++ yaffs_gross_unlock(dev);
++
++ if (ret >= 0)
++ ret = 0;
++
++ if (ret) {
++ ClearPageUptodate(pg);
++ SetPageError(pg);
++ } else {
++ SetPageUptodate(pg);
++ ClearPageError(pg);
++ }
++
++ flush_dcache_page(pg);
++ kunmap(pg);
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage_nolock done\n")));
++ return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++ int ret = yaffs_readpage_nolock(f, pg);
++ UnlockPage(pg);
++ return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++ int ret;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage\n")));
++ ret=yaffs_readpage_unlock(f, pg);
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage done\n")));
++ return ret;
++}
++
++/* writepage inspired by/stolen from smbfs */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++ yaffs_dev_t *dev;
++ struct address_space *mapping = page->mapping;
++ struct inode *inode;
++ unsigned long end_index;
++ char *buffer;
++ yaffs_obj_t *obj;
++ int nWritten = 0;
++ unsigned n_bytes;
++ loff_t i_size;
++
++ if (!mapping)
++ BUG();
++ inode = mapping->host;
++ if (!inode)
++ BUG();
++ i_size = i_size_read(inode);
++
++ end_index = i_size >> PAGE_CACHE_SHIFT;
++
++ if(page->index < end_index)
++ n_bytes = PAGE_CACHE_SIZE;
++ else {
++ n_bytes = i_size & (PAGE_CACHE_SIZE -1);
++
++ if (page->index > end_index || !n_bytes) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_writepage at %08x, inode size = %08x!!!\n"),
++ (unsigned)(page->index << PAGE_CACHE_SHIFT),
++ (unsigned)inode->i_size));
++ T(YAFFS_TRACE_OS,
++ (TSTR(" -> don't care!!\n")));
++
++ zero_user_segment(page,0,PAGE_CACHE_SIZE);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ return 0;
++ }
++ }
++
++ if(n_bytes != PAGE_CACHE_SIZE)
++ zero_user_segment(page,n_bytes,PAGE_CACHE_SIZE);
++
++ get_page(page);
++
++ buffer = kmap(page);
++
++ obj = yaffs_InodeToObject(inode);
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_writepage at %08x, size %08x\n"),
++ (unsigned)(page->index << PAGE_CACHE_SHIFT), n_bytes));
++ T(YAFFS_TRACE_OS,
++ (TSTR("writepag0: obj = %05x, ino = %05x\n"),
++ (int)obj->variant.file_variant.file_size, (int)inode->i_size));
++
++ nWritten = yaffs_wr_file(obj, buffer,
++ page->index << PAGE_CACHE_SHIFT, n_bytes, 0);
++
++ yaffs_touch_super(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("writepag1: obj = %05x, ino = %05x\n"),
++ (int)obj->variant.file_variant.file_size, (int)inode->i_size));
++
++ yaffs_gross_unlock(dev);
++
++ kunmap(page);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ put_page(page);
++
++ return (nWritten == n_bytes) ? 0 : -ENOSPC;
++}
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata)
++{
++ struct page *pg = NULL;
++ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++
++ int ret = 0;
++ int space_held = 0;
++
++ /* Get a page */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++ pg = __grab_cache_page(mapping, index);
++#endif
++
++ *pagep = pg;
++ if (!pg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("start yaffs_write_begin index %d(%x) uptodate %d\n"),
++ (int)index,(int)index,Page_Uptodate(pg) ? 1 : 0));
++
++ /* Get fs space */
++ space_held = yaffs_hold_space(filp);
++
++ if (!space_held) {
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ /* Update page if required */
++
++ if (!Page_Uptodate(pg))
++ ret = yaffs_readpage_nolock(filp, pg);
++
++ if (ret)
++ goto out;
++
++ /* Happy path return */
++ T(YAFFS_TRACE_OS, (TSTR("end yaffs_write_begin - ok\n")));
++
++ return 0;
++
++out:
++ T(YAFFS_TRACE_OS,
++ (TSTR("end yaffs_write_begin fail returning %d\n"), ret));
++ if (space_held)
++ yaffs_release_space(filp);
++ if (pg) {
++ unlock_page(pg);
++ page_cache_release(pg);
++ }
++ return ret;
++}
++
++#else
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++ unsigned offset, unsigned to)
++{
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_prepair_write\n")));
++
++ if (!Page_Uptodate(pg))
++ return yaffs_readpage_nolock(f, pg);
++ return 0;
++}
++#endif
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata)
++{
++ int ret = 0;
++ void *addr, *kva;
++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++ kva = kmap(pg);
++ addr = kva + offset_into_page;
++
++ T(YAFFS_TRACE_OS,
++ ("yaffs_write_end addr %p pos %x n_bytes %d\n",
++ addr,(unsigned)pos, copied));
++
++ ret = yaffs_file_write(filp, addr, copied, &pos);
++
++ if (ret != copied) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_write_end not same size ret %d copied %d\n"),
++ ret, copied));
++ SetPageError(pg);
++ } else {
++ /* Nothing */
++ }
++
++ kunmap(pg);
++
++ yaffs_release_space(filp);
++ unlock_page(pg);
++ page_cache_release(pg);
++ return ret;
++}
++#else
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++ unsigned to)
++{
++ void *addr, *kva;
++
++ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++ int n_bytes = to - offset;
++ int nWritten;
++
++ unsigned spos = pos;
++ unsigned saddr;
++
++ kva = kmap(pg);
++ addr = kva + offset;
++
++ saddr = (unsigned) addr;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_commit_write addr %x pos %x n_bytes %d\n"),
++ saddr, spos, n_bytes));
++
++ nWritten = yaffs_file_write(f, addr, n_bytes, &pos);
++
++ if (nWritten != n_bytes) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_commit_write not same size nWritten %d n_bytes %d\n"),
++ nWritten, n_bytes));
++ SetPageError(pg);
++ } else {
++ /* Nothing */
++ }
++
++ kunmap(pg);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_commit_write returning %d\n"),
++ nWritten == n_bytes ? 0 : nWritten));
++
++ return nWritten == n_bytes ? 0 : nWritten;
++}
++#endif
++
++
++static void yaffs_fill_inode_from_obj(struct inode *inode, yaffs_obj_t *obj)
++{
++ if (inode && obj) {
++
++
++ /* Check mode against the variant type and attempt to repair if broken. */
++ __u32 mode = obj->yst_mode;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (!S_ISREG(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFREG;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (!S_ISLNK(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFLNK;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!S_ISDIR(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFDIR;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ default:
++ /* TODO? */
++ break;
++ }
++
++ inode->i_flags |= S_NOATIME;
++
++ inode->i_ino = obj->obj_id;
++ inode->i_mode = obj->yst_mode;
++ inode->i_uid = obj->yst_uid;
++ inode->i_gid = obj->yst_gid;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++ inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++ inode->i_rdev = old_decode_dev(obj->yst_rdev);
++ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++ inode->i_atime.tv_nsec = 0;
++ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++ inode->i_mtime.tv_nsec = 0;
++ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++ inode->i_ctime.tv_nsec = 0;
++#else
++ inode->i_rdev = obj->yst_rdev;
++ inode->i_atime = obj->yst_atime;
++ inode->i_mtime = obj->yst_mtime;
++ inode->i_ctime = obj->yst_ctime;
++#endif
++ inode->i_size = yaffs_get_obj_length(obj);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++
++ inode->i_nlink = yaffs_get_obj_link_count(obj);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"),
++ inode->i_mode, inode->i_uid, inode->i_gid,
++ (int)inode->i_size, atomic_read(&inode->i_count)));
++
++ switch (obj->yst_mode & S_IFMT) {
++ default: /* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ init_special_inode(inode, obj->yst_mode,
++ old_decode_dev(obj->yst_rdev));
++#else
++ init_special_inode(inode, obj->yst_mode,
++ (dev_t) (obj->yst_rdev));
++#endif
++ break;
++ case S_IFREG: /* file */
++ inode->i_op = &yaffs_file_inode_operations;
++ inode->i_fop = &yaffs_file_operations;
++ inode->i_mapping->a_ops =
++ &yaffs_file_address_operations;
++ break;
++ case S_IFDIR: /* directory */
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++ break;
++ case S_IFLNK: /* symlink */
++ inode->i_op = &yaffs_symlink_inode_operations;
++ break;
++ }
++
++ yaffs_InodeToObjectLV(inode) = obj;
++
++ obj->my_inode = inode;
++
++ } else {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_FileInode invalid parameters\n")));
++ }
++
++}
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++ yaffs_obj_t *obj)
++{
++ struct inode *inode;
++
++ if (!sb) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_get_inode for NULL super_block!!\n")));
++ return NULL;
++
++ }
++
++ if (!obj) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_get_inode for NULL object!!\n")));
++ return NULL;
++
++ }
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_get_inode for object %d\n"), obj->obj_id));
++
++ inode = Y_IGET(sb, obj->obj_id);
++ if (IS_ERR(inode))
++ return NULL;
++
++ /* NB Side effect: iget calls back to yaffs_read_inode(). */
++ /* iget also increments the inode's i_count */
++ /* NB You can't be holding grossLock or deadlock will happen! */
++
++ return inode;
++}
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++ loff_t *pos)
++{
++ yaffs_obj_t *obj;
++ int nWritten, ipos;
++ struct inode *inode;
++ yaffs_dev_t *dev;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ inode = f->f_dentry->d_inode;
++
++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
++ ipos = inode->i_size;
++ else
++ ipos = *pos;
++
++ if (!obj)
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write: hey obj is null!\n")));
++ else
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write about to write writing %u(%x) bytes"
++ "to object %d at %d(%x)\n"),
++ (unsigned) n, (unsigned) n, obj->obj_id, ipos,ipos));
++
++ nWritten = yaffs_wr_file(obj, buf, ipos, n, 0);
++
++ yaffs_touch_super(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write: %d(%x) bytes written\n"),
++ (unsigned )n,(unsigned)n));
++
++ if (nWritten > 0) {
++ ipos += nWritten;
++ *pos = ipos;
++ if (ipos > inode->i_size) {
++ inode->i_size = ipos;
++ inode->i_blocks = (ipos + 511) >> 9;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write size updated to %d bytes, "
++ "%d blocks\n"),
++ ipos, (int)(inode->i_blocks)));
++ }
++
++ }
++ yaffs_gross_unlock(dev);
++ return (nWritten == 0) && (n > 0) ? -ENOSPC : nWritten;
++}
++
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++
++ int n_free_chunks;
++
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ n_free_chunks = yaffs_get_n_free_chunks(dev);
++
++ yaffs_gross_unlock(dev);
++
++ return (n_free_chunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++
++ yaffs_gross_unlock(dev);
++}
++
++
++static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ long long retval;
++
++ lock_kernel();
++
++ switch (origin){
++ case 2:
++ offset += i_size_read(file->f_path.dentry->d_inode);
++ break;
++ case 1:
++ offset += file->f_pos;
++ }
++ retval = -EINVAL;
++
++ if (offset >= 0){
++ if (offset != file->f_pos)
++ file->f_pos = offset;
++
++ retval = offset;
++ }
++ unlock_kernel();
++ return retval;
++}
++
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++ struct yaffs_SearchContext *sc;
++ struct inode *inode = f->f_dentry->d_inode;
++ unsigned long offset, curoffs;
++ yaffs_obj_t *l;
++ int retVal = 0;
++
++ char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_dev_to_lc(dev)->readdirProcess = current;
++
++ offset = f->f_pos;
++
++ sc = yaffs_NewSearch(obj);
++ if(!sc){
++ retVal = -ENOMEM;
++ goto out;
++ }
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readdir: starting at %d\n"), (int)offset));
++
++ if (offset == 0) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readdir: entry . ino %d \n"),
++ (int)inode->i_ino));
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0){
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++ if (offset == 1) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readdir: entry .. ino %d \n"),
++ (int)f->f_dentry->d_parent->d_inode->i_ino));
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, "..", 2, offset,
++ f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0){
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++
++ curoffs = 1;
++
++ /* If the directory has changed since the open or last call to
++ readdir, rewind to after the 2 canned entries. */
++ if (f->f_version != inode->i_version) {
++ offset = 2;
++ f->f_pos = offset;
++ f->f_version = inode->i_version;
++ }
++
++ while(sc->nextReturn){
++ curoffs++;
++ l = sc->nextReturn;
++ if (curoffs >= offset) {
++ int this_inode = yaffs_get_obj_inode(l);
++ int this_type = yaffs_get_obj_type(l);
++
++ yaffs_get_obj_name(l, name,
++ YAFFS_MAX_NAME_LENGTH + 1);
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readdir: %s inode %d\n"),
++ name, yaffs_get_obj_inode(l)));
++
++ yaffs_gross_unlock(dev);
++
++ if (filldir(dirent,
++ name,
++ strlen(name),
++ offset,
++ this_inode,
++ this_type) < 0){
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++
++ yaffs_gross_lock(dev);
++
++ offset++;
++ f->f_pos++;
++ }
++ yaffs_search_advance(sc);
++ }
++
++out:
++ yaffs_search_end(sc);
++ yaffs_dev_to_lc(dev)->readdirProcess = NULL;
++ yaffs_gross_unlock(dev);
++
++ return retVal;
++}
++
++
++
++/*
++ * File creation. Allocate an inode, and we're done..
++ */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ int rdev)
++#endif
++{
++ struct inode *inode;
++
++ yaffs_obj_t *obj = NULL;
++ yaffs_dev_t *dev;
++
++ yaffs_obj_t *parent = yaffs_InodeToObject(dir);
++
++ int error = -ENOSPC;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++ mode |= S_ISGID;
++
++ if (parent) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod: parent object %d type %d\n"),
++ parent->obj_id, parent->variant_type));
++ } else {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod: could not get parent object\n")));
++ return -EPERM;
++ }
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making oject for %s, "
++ "mode %x dev %x\n"),
++ dentry->d_name.name, mode, rdev));
++
++ dev = parent->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ switch (mode & S_IFMT) {
++ default:
++ /* Special (socket, fifo, device...) */
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making special\n")));
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ obj = yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, old_encode_dev(rdev));
++#else
++ obj = yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, rdev);
++#endif
++ break;
++ case S_IFREG: /* file */
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making file\n")));
++ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
++ gid);
++ break;
++ case S_IFDIR: /* directory */
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod: making directory\n")));
++ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
++ uid, gid);
++ break;
++ case S_IFLNK: /* symlink */
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making symlink\n")));
++ obj = NULL; /* Do we ever get here? */
++ break;
++ }
++
++ /* Can not call yaffs_get_inode() with gross lock held */
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod created object %d count = %d\n"),
++ obj->obj_id, atomic_read(&inode->i_count)));
++ error = 0;
++ yaffs_fill_inode_from_obj(dir,parent);
++ } else {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod failed making object\n")));
++ error = -ENOMEM;
++ }
++
++ return error;
++}
++
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++ int retVal;
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mkdir\n")));
++ retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++ return retVal;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++ struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++ T(YAFFS_TRACE_OS,(TSTR("yaffs_create\n")));
++ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++ int retVal;
++
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_unlink %d:%s\n"),
++ (int)(dir->i_ino),
++ dentry->d_name.name));
++ obj = yaffs_InodeToObject(dir);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ retVal = yaffs_unlinker(obj, dentry->d_name.name);
++
++ if (retVal == YAFFS_OK) {
++ dentry->d_inode->i_nlink--;
++ dir->i_version++;
++ yaffs_gross_unlock(dev);
++ mark_inode_dirty(dentry->d_inode);
++ update_dir_time(dir);
++ return 0;
++ }
++ yaffs_gross_unlock(dev);
++ return -ENOTEMPTY;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry)
++{
++ struct inode *inode = old_dentry->d_inode;
++ yaffs_obj_t *obj = NULL;
++ yaffs_obj_t *link = NULL;
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_link\n")));
++
++ obj = yaffs_InodeToObject(inode);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
++ link = yaffs_link_obj(yaffs_InodeToObject(dir), dentry->d_name.name,
++ obj);
++
++ if (link) {
++ old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
++ d_instantiate(dentry, old_dentry->d_inode);
++ atomic_inc(&old_dentry->d_inode->i_count);
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_link link count %d i_count %d\n"),
++ old_dentry->d_inode->i_nlink,
++ atomic_read(&old_dentry->d_inode->i_count)));
++ }
++
++ yaffs_gross_unlock(dev);
++
++ if (link){
++ update_dir_time(dir);
++ return 0;
++ }
++
++ return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_symlink\n")));
++
++ dev = yaffs_InodeToObject(dir)->my_dev;
++ yaffs_gross_lock(dev);
++ obj = yaffs_create_symlink(yaffs_InodeToObject(dir), dentry->d_name.name,
++ S_IFLNK | S_IRWXUGO, uid, gid, symname);
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ struct inode *inode;
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ T(YAFFS_TRACE_OS, (TSTR("symlink created OK\n")));
++ return 0;
++ } else {
++ T(YAFFS_TRACE_OS, (TSTR("symlink not created\n")));
++ }
++
++ return -ENOMEM;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync)
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++ int datasync)
++#endif
++{
++
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++ struct dentry *dentry = file->f_path.dentry;
++#endif
++
++ obj = yaffs_dentry_to_obj(dentry);
++
++ dev = obj->my_dev;
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ (TSTR("yaffs_sync_object\n")));
++ yaffs_gross_lock(dev);
++ yaffs_flush_file(obj, 1, datasync);
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry)
++{
++ yaffs_dev_t *dev;
++ int retVal = YAFFS_FAIL;
++ yaffs_obj_t *target;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_rename\n")));
++ dev = yaffs_InodeToObject(old_dir)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ /* Check if the target is an existing directory that is not empty. */
++ target = yaffs_find_by_name(yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
++
++
++
++ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&target->variant.dir_variant.children)) {
++
++ T(YAFFS_TRACE_OS, (TSTR("target is non-empty dir\n")));
++
++ retVal = YAFFS_FAIL;
++ } else {
++ /* Now does unlinking internally using shadowing mechanism */
++ T(YAFFS_TRACE_OS, (TSTR("calling yaffs_rename_obj\n")));
++
++ retVal = yaffs_rename_obj(yaffs_InodeToObject(old_dir),
++ old_dentry->d_name.name,
++ yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
++ }
++ yaffs_gross_unlock(dev);
++
++ if (retVal == YAFFS_OK) {
++ if (target) {
++ new_dentry->d_inode->i_nlink--;
++ mark_inode_dirty(new_dentry->d_inode);
++ }
++
++ update_dir_time(old_dir);
++ if(old_dir != new_dir)
++ update_dir_time(new_dir);
++ return 0;
++ } else {
++ return -ENOTEMPTY;
++ }
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setattr of object %d\n"),
++ yaffs_InodeToObject(inode)->obj_id));
++
++ /* Fail if a requested resize >= 2GB */
++ if (attr->ia_valid & ATTR_SIZE &&
++ (attr->ia_size >> 31))
++ error = -EINVAL;
++
++ if (error == 0)
++ error = inode_change_ok(inode, attr);
++ if (error == 0) {
++ int result;
++ if (!error){
++ error = yaffs_vfs_setattr(inode, attr);
++ T(YAFFS_TRACE_OS,(TSTR("inode_setattr called\n")));
++ if (attr->ia_valid & ATTR_SIZE){
++ yaffs_vfs_setsize(inode,attr->ia_size);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++ }
++ }
++ dev = yaffs_InodeToObject(inode)->my_dev;
++ if (attr->ia_valid & ATTR_SIZE){
++ T(YAFFS_TRACE_OS,(TSTR("resize to %d(%x)\n"),
++ (int)(attr->ia_size),(int)(attr->ia_size)));
++ }
++ yaffs_gross_lock(dev);
++ result = yaffs_set_attribs(yaffs_InodeToObject(inode), attr);
++ if(result == YAFFS_OK) {
++ error = 0;
++ } else {
++ error = -EPERM;
++ }
++ yaffs_gross_unlock(dev);
++
++ }
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setattr done returning %d\n"),error));
++
++ return error;
++}
++
++#ifdef CONFIG_YAFFS_XATTR
++int yaffs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setxattr of object %d\n"),
++ obj->obj_id));
++
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_set_xattrib(obj, name, value, size, flags);
++ if(result == YAFFS_OK)
++ error = 0;
++ else if(result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setxattr done returning %d\n"),error));
++
++ return error;
++}
++
++
++ssize_t yaffs_getxattr(struct dentry *dentry, const char *name, void *buff,
++ size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_getxattr \"%s\" from object %d\n"),
++ name, obj->obj_id));
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_get_xattrib(obj, name, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_getxattr done returning %d\n"),error));
++
++ return error;
++}
++
++int yaffs_removexattr(struct dentry *dentry, const char *name)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_removexattr of object %d\n"),
++ obj->obj_id));
++
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_remove_xattrib(obj, name);
++ if(result == YAFFS_OK)
++ error = 0;
++ else if(result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_removexattr done returning %d\n"),error));
++
++ return error;
++}
++
++ssize_t yaffs_listxattr(struct dentry *dentry, char *buff, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_listxattr of object %d\n"),
++ obj->obj_id));
++
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_list_xattrib(obj, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_listxattr done returning %d\n"),error));
++
++ return error;
++}
++
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ yaffs_dev_t *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++ struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++#endif
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_statfs\n")));
++
++ yaffs_gross_lock(dev);
++
++ buf->f_type = YAFFS_MAGIC;
++ buf->f_bsize = sb->s_blocksize;
++ buf->f_namelen = 255;
++
++ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
++ /* Do this if chunk size is not a power of 2 */
++
++ uint64_t bytesInDev;
++ uint64_t bytesFree;
++
++ bytesInDev = ((uint64_t)((dev->param.end_block - dev->param.start_block + 1))) *
++ ((uint64_t)(dev->param.chunks_per_block * dev->data_bytes_per_chunk));
++
++ do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
++ buf->f_blocks = bytesInDev;
++
++ bytesFree = ((uint64_t)(yaffs_get_n_free_chunks(dev))) *
++ ((uint64_t)(dev->data_bytes_per_chunk));
++
++ do_div(bytesFree, sb->s_blocksize);
++
++ buf->f_bfree = bytesFree;
++
++ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
++
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ } else {
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++ }
++
++ buf->f_files = 0;
++ buf->f_ffree = 0;
++ buf->f_bavail = buf->f_bfree;
++
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++
++
++static void yaffs_flush_inodes(struct super_block *sb)
++{
++ struct inode *iptr;
++ yaffs_obj_t *obj;
++
++ list_for_each_entry(iptr,&sb->s_inodes, i_sb_list){
++ obj = yaffs_InodeToObject(iptr);
++ if(obj){
++ T(YAFFS_TRACE_OS, (TSTR("flushing obj %d\n"),
++ obj->obj_id));
++ yaffs_flush_file(obj,1,0);
++ }
++ }
++}
++
++
++static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++ if(!dev)
++ return;
++
++ yaffs_flush_inodes(sb);
++ yaffs_update_dirty_dirs(dev);
++ yaffs_flush_whole_cache(dev);
++ if(do_checkpoint)
++ yaffs_checkpoint_save(dev);
++}
++
++
++static unsigned yaffs_bg_gc_urgency(yaffs_dev_t *dev)
++{
++ unsigned erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++ struct yaffs_LinuxContext *context = yaffs_dev_to_lc(dev);
++ unsigned scatteredFree = 0; /* Free chunks not in an erased block */
++
++ if(erasedChunks < dev->n_free_chunks)
++ scatteredFree = (dev->n_free_chunks - erasedChunks);
++
++ if(!context->bgRunning)
++ return 0;
++ else if(scatteredFree < (dev->param.chunks_per_block * 2))
++ return 0;
++ else if(erasedChunks > dev->n_free_chunks/2)
++ return 0;
++ else if(erasedChunks > dev->n_free_chunks/4)
++ return 1;
++ else
++ return 2;
++}
++
++static int yaffs_do_sync_fs(struct super_block *sb,
++ int request_checkpoint)
++{
++
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
++ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
++ int do_checkpoint;
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_do_sync_fs: gc-urgency %d %s %s%s\n"),
++ gc_urgent,
++ sb->s_dirt ? "dirty" : "clean",
++ request_checkpoint ? "checkpoint requested" : "no checkpoint",
++ oneshot_checkpoint ? " one-shot" : "" ));
++
++ yaffs_gross_lock(dev);
++ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
++ oneshot_checkpoint) &&
++ !dev->is_checkpointed;
++
++ if (sb->s_dirt || do_checkpoint) {
++ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
++ sb->s_dirt = 0;
++ if(oneshot_checkpoint)
++ yaffs_auto_checkpoint &= ~4;
++ }
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++/*
++ * yaffs background thread functions .
++ * yaffs_bg_thread_fn() the thread function
++ * yaffs_bg_start() launches the background thread.
++ * yaffs_bg_stop() cleans up the background thread.
++ *
++ * NB:
++ * The thread should only run after the yaffs is initialised
++ * The thread should be stopped before yaffs is unmounted.
++ * The thread should not do any writing while the fs is in read only.
++ */
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++
++void yaffs_background_waker(unsigned long data)
++{
++ wake_up_process((struct task_struct *)data);
++}
++
++static int yaffs_bg_thread_fn(void *data)
++{
++ yaffs_dev_t *dev = (yaffs_dev_t *)data;
++ struct yaffs_LinuxContext *context = yaffs_dev_to_lc(dev);
++ unsigned long now = jiffies;
++ unsigned long next_dir_update = now;
++ unsigned long next_gc = now;
++ unsigned long expires;
++ unsigned int urgency;
++
++ int gcResult;
++ struct timer_list timer;
++
++ T(YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_background starting for dev %p\n"),
++ (void *)dev));
++
++#ifdef YAFFS_COMPILE_FREEZER
++ set_freezable();
++#endif
++ while(context->bgRunning){
++ T(YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_background\n")));
++
++ if(kthread_should_stop())
++ break;
++
++#ifdef YAFFS_COMPILE_FREEZER
++ if(try_to_freeze())
++ continue;
++#endif
++ yaffs_gross_lock(dev);
++
++ now = jiffies;
++
++ if(time_after(now, next_dir_update) && yaffs_bg_enable){
++ yaffs_update_dirty_dirs(dev);
++ next_dir_update = now + HZ;
++ }
++
++ if(time_after(now,next_gc) && yaffs_bg_enable){
++ if(!dev->is_checkpointed){
++ urgency = yaffs_bg_gc_urgency(dev);
++ gcResult = yaffs_bg_gc(dev, urgency);
++ if(urgency > 1)
++ next_gc = now + HZ/20+1;
++ else if(urgency > 0)
++ next_gc = now + HZ/10+1;
++ else
++ next_gc = now + HZ * 2;
++ } else /*
++ * gc not running so set to next_dir_update
++ * to cut down on wake ups
++ */
++ next_gc = next_dir_update;
++ }
++ yaffs_gross_unlock(dev);
++#if 1
++ expires = next_dir_update;
++ if (time_before(next_gc,expires))
++ expires = next_gc;
++ if(time_before(expires,now))
++ expires = now + HZ;
++
++ Y_INIT_TIMER(&timer);
++ timer.expires = expires+1;
++ timer.data = (unsigned long) current;
++ timer.function = yaffs_background_waker;
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_timer(&timer);
++ schedule();
++ del_timer_sync(&timer);
++#else
++ msleep(10);
++#endif
++ }
++
++ return 0;
++}
++
++static int yaffs_bg_start(yaffs_dev_t *dev)
++{
++ int retval = 0;
++ struct yaffs_LinuxContext *context = yaffs_dev_to_lc(dev);
++
++ if(dev->read_only)
++ return -1;
++
++ context->bgRunning = 1;
++
++ context->bgThread = kthread_run(yaffs_bg_thread_fn,
++ (void *)dev,"yaffs-bg-%d",context->mount_id);
++
++ if(IS_ERR(context->bgThread)){
++ retval = PTR_ERR(context->bgThread);
++ context->bgThread = NULL;
++ context->bgRunning = 0;
++ }
++ return retval;
++}
++
++static void yaffs_bg_stop(yaffs_dev_t *dev)
++{
++ struct yaffs_LinuxContext *ctxt = yaffs_dev_to_lc(dev);
++
++ ctxt->bgRunning = 0;
++
++ if( ctxt->bgThread){
++ kthread_stop(ctxt->bgThread);
++ ctxt->bgThread = NULL;
++ }
++}
++#else
++static int yaffs_bg_thread_fn(void *data)
++{
++ return 0;
++}
++
++static int yaffs_bg_start(yaffs_dev_t *dev)
++{
++ return 0;
++}
++
++static void yaffs_bg_stop(yaffs_dev_t *dev)
++{
++}
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_write_super%s\n"),
++ request_checkpoint ? " checkpt" : ""));
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ return 0;
++#endif
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ (TSTR("yaffs_sync_fs%s\n"),
++ request_checkpoint ? " checkpt" : ""));
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++ return 0;
++}
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++ struct inode *inode;
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_iget for %lu\n"), ino));
++
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ yaffs_gross_unlock(dev);
++
++ unlock_new_inode(inode);
++ return inode;
++}
++
++#else
++
++static void yaffs_read_inode(struct inode *inode)
++{
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev = yaffs_SuperToDevice(inode->i_sb);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_inode for %d\n"), (int)inode->i_ino));
++
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_unlock(dev);
++}
++
++#endif
++
++static YLIST_HEAD(yaffs_context_list);
++struct semaphore yaffs_context_lock;
++
++static void yaffs_put_super(struct super_block *sb)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_put_super\n")));
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ (TSTR("Shutting down yaffs background thread\n")));
++ yaffs_bg_stop(dev);
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs background thread shut down\n")));
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_super(sb,1);
++
++ if (yaffs_dev_to_lc(dev)->putSuperFunc)
++ yaffs_dev_to_lc(dev)->putSuperFunc(sb);
++
++
++ yaffs_deinitialise(dev);
++
++ yaffs_gross_unlock(dev);
++
++ down(&yaffs_context_lock);
++ ylist_del_init(&(yaffs_dev_to_lc(dev)->contextList));
++ up(&yaffs_context_lock);
++
++ if (yaffs_dev_to_lc(dev)->spareBuffer) {
++ YFREE(yaffs_dev_to_lc(dev)->spareBuffer);
++ yaffs_dev_to_lc(dev)->spareBuffer = NULL;
++ }
++
++ kfree(dev);
++}
++
++
++static void yaffs_MTDPutSuper(struct super_block *sb)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_SuperToDevice(sb));
++
++ if (mtd->sync)
++ mtd->sync(mtd);
++
++ put_mtd_device(mtd);
++}
++
++
++static void yaffs_touch_super(yaffs_dev_t *dev)
++{
++ struct super_block *sb = yaffs_dev_to_lc(dev)->superBlock;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_touch_super() sb = %p\n"), sb));
++ if (sb)
++ sb->s_dirt = 1;
++}
++
++typedef struct {
++ int inband_tags;
++ int skip_checkpoint_read;
++ int skip_checkpoint_write;
++ int no_cache;
++ int tags_ecc_on;
++ int tags_ecc_overridden;
++ int lazy_loading_enabled;
++ int lazy_loading_overridden;
++ int empty_lost_and_found;
++ int empty_lost_and_found_overridden;
++} yaffs_options;
++
++#define MAX_OPT_LEN 30
++static int yaffs_parse_options(yaffs_options *options, const char *options_str)
++{
++ char cur_opt[MAX_OPT_LEN + 1];
++ int p;
++ int error = 0;
++
++ /* Parse through the options which is a comma seperated list */
++
++ while (options_str && *options_str && !error) {
++ memset(cur_opt, 0, MAX_OPT_LEN + 1);
++ p = 0;
++
++ while(*options_str == ',')
++ options_str++;
++
++ while (*options_str && *options_str != ',') {
++ if (p < MAX_OPT_LEN) {
++ cur_opt[p] = *options_str;
++ p++;
++ }
++ options_str++;
++ }
++
++ if (!strcmp(cur_opt, "inband-tags"))
++ options->inband_tags = 1;
++ else if (!strcmp(cur_opt, "tags-ecc-off")){
++ options->tags_ecc_on = 0;
++ options->tags_ecc_overridden=1;
++ } else if (!strcmp(cur_opt, "tags-ecc-on")){
++ options->tags_ecc_on = 1;
++ options->tags_ecc_overridden = 1;
++ } else if (!strcmp(cur_opt, "lazy-loading-off")){
++ options->lazy_loading_enabled = 0;
++ options->lazy_loading_overridden=1;
++ } else if (!strcmp(cur_opt, "lazy-loading-on")){
++ options->lazy_loading_enabled = 1;
++ options->lazy_loading_overridden = 1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")){
++ options->empty_lost_and_found = 0;
++ options->empty_lost_and_found_overridden=1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")){
++ options->empty_lost_and_found = 1;
++ options->empty_lost_and_found_overridden=1;
++ } else if (!strcmp(cur_opt, "no-cache"))
++ options->no_cache = 1;
++ else if (!strcmp(cur_opt, "no-checkpoint-read"))
++ options->skip_checkpoint_read = 1;
++ else if (!strcmp(cur_opt, "no-checkpoint-write"))
++ options->skip_checkpoint_write = 1;
++ else if (!strcmp(cur_opt, "no-checkpoint")) {
++ options->skip_checkpoint_read = 1;
++ options->skip_checkpoint_write = 1;
++ } else {
++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++ cur_opt);
++ error = 1;
++ }
++ }
++
++ return error;
++}
++
++static struct super_block *yaffs_internal_read_super(int yaffs_version,
++ struct super_block *sb,
++ void *data, int silent)
++{
++ int nBlocks;
++ struct inode *inode = NULL;
++ struct dentry *root;
++ yaffs_dev_t *dev = 0;
++ char devname_buf[BDEVNAME_SIZE + 1];
++ struct mtd_info *mtd;
++ int err;
++ char *data_str = (char *)data;
++ struct yaffs_LinuxContext *context = NULL;
++ yaffs_param_t *param;
++
++ int read_only = 0;
++
++ yaffs_options options;
++
++ unsigned mount_id;
++ int found;
++ struct yaffs_LinuxContext *context_iterator;
++ struct ylist_head *l;
++
++ sb->s_magic = YAFFS_MAGIC;
++ sb->s_op = &yaffs_super_ops;
++ sb->s_flags |= MS_NOATIME;
++
++ read_only =((sb->s_flags & MS_RDONLY) != 0);
++
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++ sb->s_export_op = &yaffs_export_ops;
++#endif
++
++ if (!sb)
++ printk(KERN_INFO "yaffs: sb is NULL\n");
++ else if (!sb->s_dev)
++ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++ else if (!yaffs_devname(sb, devname_buf))
++ printk(KERN_INFO "yaffs: devname is NULL\n");
++ else
++ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
++ sb->s_dev,
++ yaffs_devname(sb, devname_buf),
++ read_only ? "ro" : "rw");
++
++ if (!data_str)
++ data_str = "";
++
++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
++
++ memset(&options, 0, sizeof(options));
++
++ if (yaffs_parse_options(&options, data_str)) {
++ /* Option parsing failed */
++ return NULL;
++ }
++
++
++ sb->s_blocksize = PAGE_CACHE_SIZE;
++ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_super: Using yaffs%d\n"), yaffs_version));
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_super: block size %d\n"),
++ (int)(sb->s_blocksize)));
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: Attempting MTD mount of %u.%u,\"%s\"\n"),
++ MAJOR(sb->s_dev), MINOR(sb->s_dev),
++ yaffs_devname(sb, devname_buf)));
++
++ /* Check it's an mtd device..... */
++ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
++ return NULL; /* This isn't an mtd device */
++
++ /* Get the device */
++ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++ if (!mtd) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device #%u doesn't appear to exist\n"),
++ MINOR(sb->s_dev)));
++ return NULL;
++ }
++ /* Check it's NAND */
++ if (mtd->type != MTD_NANDFLASH) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device is not NAND it's type %d\n"),
++ mtd->type));
++ return NULL;
++ }
++
++ T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->erase));
++ T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->read));
++ T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->write));
++ T(YAFFS_TRACE_OS, (TSTR(" readoob %p\n"), mtd->read_oob));
++ T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->write_oob));
++ T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->block_isbad));
++ T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->block_markbad));
++ T(YAFFS_TRACE_OS, (TSTR(" %s %d\n"), WRITE_SIZE_STR, WRITE_SIZE(mtd)));
++ T(YAFFS_TRACE_OS, (TSTR(" oobsize %d\n"), mtd->oobsize));
++ T(YAFFS_TRACE_OS, (TSTR(" erasesize %d\n"), mtd->erasesize));
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++ T(YAFFS_TRACE_OS, (TSTR(" size %u\n"), mtd->size));
++#else
++ T(YAFFS_TRACE_OS, (TSTR(" size %lld\n"), mtd->size));
++#endif
++
++#ifdef CONFIG_YAFFS_AUTO_YAFFS2
++
++ if (yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: auto selecting yaffs2\n")));
++ yaffs_version = 2;
++ }
++
++ /* Added NCB 26/5/2006 for completeness */
++ if (yaffs_version == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: auto selecting yaffs1\n")));
++ yaffs_version = 1;
++ }
++
++#endif
++
++ if (yaffs_version == 2) {
++ /* Check for version 2 style functions */
++ if (!mtd->erase ||
++ !mtd->block_isbad ||
++ !mtd->block_markbad ||
++ !mtd->read ||
++ !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ !mtd->read_oob || !mtd->write_oob) {
++#else
++ !mtd->write_ecc ||
++ !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not support required "
++ "functions\n")));
++ return NULL;
++ }
++
++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++ !options.inband_tags) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not have the "
++ "right page sizes\n")));
++ return NULL;
++ }
++ } else {
++ /* Check for V1 style functions */
++ if (!mtd->erase ||
++ !mtd->read ||
++ !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ !mtd->read_oob || !mtd->write_oob) {
++#else
++ !mtd->write_ecc ||
++ !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not support required "
++ "functions\n")));
++ return NULL;
++ }
++
++ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not support have the "
++ "right page sizes\n")));
++ return NULL;
++ }
++ }
++
++ /* OK, so if we got here, we have an MTD that's NAND and looks
++ * like it has the right capabilities
++ * Set the yaffs_dev_t up for mtd
++ */
++
++ if (!read_only && !(mtd->flags & MTD_WRITEABLE)){
++ read_only = 1;
++ printk(KERN_INFO "yaffs: mtd is read only, setting superblock read only");
++ sb->s_flags |= MS_RDONLY;
++ }
++
++ dev = kmalloc(sizeof(yaffs_dev_t), GFP_KERNEL);
++ context = kmalloc(sizeof(struct yaffs_LinuxContext),GFP_KERNEL);
++
++ if(!dev || !context ){
++ if(dev)
++ kfree(dev);
++ if(context)
++ kfree(context);
++ dev = NULL;
++ context = NULL;
++ }
++
++ if (!dev) {
++ /* Deep shit could not allocate device structure */
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs_read_super: Failed trying to allocate "
++ "yaffs_dev_t. \n")));
++ return NULL;
++ }
++ memset(dev, 0, sizeof(yaffs_dev_t));
++ param = &(dev->param);
++
++ memset(context,0,sizeof(struct yaffs_LinuxContext));
++ dev->os_context = context;
++ YINIT_LIST_HEAD(&(context->contextList));
++ context->dev = dev;
++ context->superBlock = sb;
++
++ dev->read_only = read_only;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ sb->s_fs_info = dev;
++#else
++ sb->u.generic_sbp = dev;
++#endif
++
++ dev->driver_context = mtd;
++ param->name = mtd->name;
++
++ /* Set up the memory size parameters.... */
++
++ nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
++
++ param->start_block = 0;
++ param->end_block = nBlocks - 1;
++ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
++ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
++ param->n_reserved_blocks = 5;
++ param->n_caches = (options.no_cache) ? 0 : 10;
++ param->inband_tags = options.inband_tags;
++
++#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
++ param->disable_lazy_load = 1;
++#endif
++#ifdef CONFIG_YAFFS_XATTR
++ param->enable_xattr = 1;
++#endif
++ if(options.lazy_loading_overridden)
++ param->disable_lazy_load = !options.lazy_loading_enabled;
++
++#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC
++ param->no_tags_ecc = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND
++#else
++ param->defered_dir_update = 1;
++#endif
++
++ if(options.tags_ecc_overridden)
++ param->no_tags_ecc = !options.tags_ecc_on;
++
++#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
++ param->empty_lost_n_found = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING
++ param->refresh_period = 0;
++#else
++ param->refresh_period = 500;
++#endif
++
++#ifdef CONFIG_YAFFS__ALWAYS_CHECK_CHUNK_ERASED
++ param->always_check_erased = 1;
++#endif
++
++ if(options.empty_lost_and_found_overridden)
++ param->empty_lost_n_found = options.empty_lost_and_found;
++
++ /* ... and the functions. */
++ if (yaffs_version == 2) {
++ param->write_chunk_tags_fn =
++ nandmtd2_WriteChunkWithTagsToNAND;
++ param->read_chunk_tags_fn =
++ nandmtd2_ReadChunkWithTagsFromNAND;
++ param->bad_block_fn = nandmtd2_MarkNANDBlockBad;
++ param->query_block_fn = nandmtd2_QueryNANDBlock;
++ yaffs_dev_to_lc(dev)->spareBuffer = YMALLOC(mtd->oobsize);
++ param->is_yaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ param->total_bytes_per_chunk = mtd->writesize;
++ param->chunks_per_block = mtd->erasesize / mtd->writesize;
++#else
++ param->total_bytes_per_chunk = mtd->oobblock;
++ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
++#endif
++ nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
++
++ param->start_block = 0;
++ param->end_block = nBlocks - 1;
++ } else {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ /* use the MTD interface in yaffs_mtdif1.c */
++ param->write_chunk_tags_fn =
++ nandmtd1_WriteChunkWithTagsToNAND;
++ param->read_chunk_tags_fn =
++ nandmtd1_ReadChunkWithTagsFromNAND;
++ param->bad_block_fn = nandmtd1_MarkNANDBlockBad;
++ param->query_block_fn = nandmtd1_QueryNANDBlock;
++#else
++ param->write_chunk_fn = nandmtd_WriteChunkToNAND;
++ param->read_chunk_fn = nandmtd_ReadChunkFromNAND;
++#endif
++ param->is_yaffs2 = 0;
++ }
++ /* ... and common functions */
++ param->erase_fn = nandmtd_EraseBlockInNAND;
++ param->initialise_flash_fn = nandmtd_InitialiseNAND;
++
++ yaffs_dev_to_lc(dev)->putSuperFunc = yaffs_MTDPutSuper;
++
++ param->sb_dirty_fn = yaffs_touch_super;
++ param->gc_control = yaffs_gc_control_callback;
++
++ yaffs_dev_to_lc(dev)->superBlock= sb;
++
++
++#ifndef CONFIG_YAFFS_DOES_ECC
++ param->use_nand_ecc = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
++ param->wide_tnodes_disabled = 1;
++#endif
++
++ param->skip_checkpt_rd = options.skip_checkpoint_read;
++ param->skip_checkpt_wr = options.skip_checkpoint_write;
++
++ down(&yaffs_context_lock);
++ /* Get a mount id */
++ found = 0;
++ for(mount_id=0; ! found; mount_id++){
++ found = 1;
++ ylist_for_each(l,&yaffs_context_list){
++ context_iterator = ylist_entry(l,struct yaffs_LinuxContext,contextList);
++ if(context_iterator->mount_id == mount_id)
++ found = 0;
++ }
++ }
++ context->mount_id = mount_id;
++
++ ylist_add_tail(&(yaffs_dev_to_lc(dev)->contextList), &yaffs_context_list);
++ up(&yaffs_context_lock);
++
++ /* Directory search handling...*/
++ YINIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->searchContexts));
++ param->remove_obj_fn = yaffs_remove_obj_callback;
++
++ init_MUTEX(&(yaffs_dev_to_lc(dev)->grossLock));
++
++ yaffs_gross_lock(dev);
++
++ err = yaffs_guts_initialise(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_super: guts initialised %s\n"),
++ (err == YAFFS_OK) ? "OK" : "FAILED"));
++
++ if(err == YAFFS_OK)
++ yaffs_bg_start(dev);
++
++ if(!context->bgThread)
++ param->defered_dir_update = 0;
++
++
++ /* Release lock before yaffs_get_inode() */
++ yaffs_gross_unlock(dev);
++
++ /* Create root inode */
++ if (err == YAFFS_OK)
++ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
++ yaffs_root(dev));
++
++ if (!inode)
++ return NULL;
++
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: got root inode\n")));
++
++ root = d_alloc_root(inode);
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: d_alloc_root done\n")));
++
++ if (!root) {
++ iput(inode);
++ return NULL;
++ }
++ sb->s_root = root;
++ sb->s_dirt = !dev->is_checkpointed;
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs_read_super: is_checkpointed %d\n"),
++ dev->is_checkpointed));
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: done\n")));
++ return sb;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data, struct vfsmount *mnt)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs",
++ .get_sb = yaffs_read_super,
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++
++#ifdef CONFIG_YAFFS_YAFFS2
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name, void *data,
++ struct vfsmount *mnt)
++{
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs2",
++ .get_sb = yaffs2_read_super,
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++ void *data, int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++#endif /* CONFIG_YAFFS_YAFFS2 */
++
++static struct proc_dir_entry *my_proc_entry;
++static struct proc_dir_entry *debug_proc_entry;
++
++static char *yaffs_dump_dev_part0(char *buf, yaffs_dev_t * dev)
++{
++ buf += sprintf(buf, "start_block.......... %d\n", dev->param.start_block);
++ buf += sprintf(buf, "end_block............ %d\n", dev->param.end_block);
++ buf += sprintf(buf, "total_bytes_per_chunk %d\n", dev->param.total_bytes_per_chunk);
++ buf += sprintf(buf, "use_nand_ecc......... %d\n", dev->param.use_nand_ecc);
++ buf += sprintf(buf, "no_tags_ecc.......... %d\n", dev->param.no_tags_ecc);
++ buf += sprintf(buf, "is_yaffs2............ %d\n", dev->param.is_yaffs2);
++ buf += sprintf(buf, "inband_tags.......... %d\n", dev->param.inband_tags);
++ buf += sprintf(buf, "empty_lost_n_found... %d\n", dev->param.empty_lost_n_found);
++ buf += sprintf(buf, "disable_lazy_load.... %d\n", dev->param.disable_lazy_load);
++ buf += sprintf(buf, "refresh_period....... %d\n", dev->param.refresh_period);
++ buf += sprintf(buf, "n_caches............. %d\n", dev->param.n_caches);
++ buf += sprintf(buf, "n_reserved_blocks.... %d\n", dev->param.n_reserved_blocks);
++ buf += sprintf(buf, "always_check_erased.. %d\n", dev->param.always_check_erased);
++
++ buf += sprintf(buf, "\n");
++
++ return buf;
++}
++
++
++static char *yaffs_dump_dev_part1(char *buf, yaffs_dev_t * dev)
++{
++ buf += sprintf(buf, "data_bytes_per_chunk. %d\n", dev->data_bytes_per_chunk);
++ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
++ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
++ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
++ buf += sprintf(buf, "blocks_in_checkpt.... %d\n", dev->blocks_in_checkpt);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
++ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
++ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
++ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
++ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
++ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
++ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
++ buf += sprintf(buf, "passive_gc_count..... %u\n", dev->passive_gc_count);
++ buf += sprintf(buf, "oldest_dirty_gc_count %u\n", dev->oldest_dirty_gc_count);
++ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
++ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
++ buf += sprintf(buf, "n_retired_writes..... %u\n", dev->n_retired_writes);
++ buf += sprintf(buf, "nRetireBlocks........ %u\n", dev->n_retired_blocks);
++ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
++ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
++ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n", dev->n_tags_ecc_fixed);
++ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n", dev->n_tags_ecc_unfixed);
++ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
++ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
++ buf += sprintf(buf, "n_unlinked_files..... %u\n", dev->n_unlinked_files);
++ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
++ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
++
++ return buf;
++}
++
++static int yaffs_proc_read(char *page,
++ char **start,
++ off_t offset, int count, int *eof, void *data)
++{
++ struct ylist_head *item;
++ char *buf = page;
++ int step = offset;
++ int n = 0;
++
++ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
++ * We use 'offset' (*ppos) to indicate where we are in dev_list.
++ * This also assumes the user has posted a read buffer large
++ * enough to hold the complete output; but that's life in /proc.
++ */
++
++ *(int *)start = 1;
++
++ /* Print header first */
++ if (step == 0)
++ buf += sprintf(buf, "Multi-version YAFFS built:" __DATE__ " " __TIME__"\n");
++ else if (step == 1)
++ buf += sprintf(buf,"\n");
++ else {
++ step-=2;
++
++ down(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ ylist_for_each(item, &yaffs_context_list) {
++ struct yaffs_LinuxContext *dc = ylist_entry(item, struct yaffs_LinuxContext, contextList);
++ yaffs_dev_t *dev = dc->dev;
++
++ if (n < (step & ~1)) {
++ n+=2;
++ continue;
++ }
++ if((step & 1)==0){
++ buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->param.name);
++ buf = yaffs_dump_dev_part0(buf, dev);
++ } else
++ buf = yaffs_dump_dev_part1(buf, dev);
++
++ break;
++ }
++ up(&yaffs_context_lock);
++ }
++
++ return buf - page < count ? buf - page : count;
++}
++
++static int yaffs_stats_proc_read(char *page,
++ char **start,
++ off_t offset, int count, int *eof, void *data)
++{
++ struct ylist_head *item;
++ char *buf = page;
++ int n = 0;
++
++ down(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ ylist_for_each(item, &yaffs_context_list) {
++ struct yaffs_LinuxContext *dc = ylist_entry(item, struct yaffs_LinuxContext, contextList);
++ yaffs_dev_t *dev = dc->dev;
++
++ int erasedChunks;
++
++ erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ buf += sprintf(buf,"%d, %d, %d, %u, %u, %u, %u\n",
++ n, dev->n_free_chunks, erasedChunks,
++ dev->bg_gcs, dev->oldest_dirty_gc_count,
++ dev->n_obj, dev->n_tnodes);
++ }
++ up(&yaffs_context_lock);
++
++
++ return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++ char *mask_name;
++ unsigned mask_bitfield;
++} mask_flags[] = {
++ {"allocate", YAFFS_TRACE_ALLOCATE},
++ {"always", YAFFS_TRACE_ALWAYS},
++ {"background", YAFFS_TRACE_BACKGROUND},
++ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++ {"buffers", YAFFS_TRACE_BUFFERS},
++ {"bug", YAFFS_TRACE_BUG},
++ {"checkpt", YAFFS_TRACE_CHECKPOINT},
++ {"deletion", YAFFS_TRACE_DELETION},
++ {"erase", YAFFS_TRACE_ERASE},
++ {"error", YAFFS_TRACE_ERROR},
++ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
++ {"gc", YAFFS_TRACE_GC},
++ {"lock", YAFFS_TRACE_LOCK},
++ {"mtd", YAFFS_TRACE_MTD},
++ {"nandaccess", YAFFS_TRACE_NANDACCESS},
++ {"os", YAFFS_TRACE_OS},
++ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++ {"scan", YAFFS_TRACE_SCAN},
++ {"tracing", YAFFS_TRACE_TRACING},
++ {"sync", YAFFS_TRACE_SYNC},
++ {"write", YAFFS_TRACE_WRITE},
++
++ {"verify", YAFFS_TRACE_VERIFY},
++ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
++ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
++
++ {"all", 0xffffffff},
++ {"none", 0},
++ {NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ unsigned rg = 0, mask_bitfield;
++ char *end;
++ char *mask_name;
++ const char *x;
++ char substring[MAX_MASK_NAME_LENGTH + 1];
++ int i;
++ int done = 0;
++ int add, len = 0;
++ int pos = 0;
++
++ rg = yaffs_trace_mask;
++
++ while (!done && (pos < count)) {
++ done = 1;
++ while ((pos < count) && isspace(buf[pos]))
++ pos++;
++
++ switch (buf[pos]) {
++ case '+':
++ case '-':
++ case '=':
++ add = buf[pos];
++ pos++;
++ break;
++
++ default:
++ add = ' ';
++ break;
++ }
++ mask_name = NULL;
++
++ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
++ if (end > buf + pos) {
++ mask_name = "numeral";
++ len = end - (buf + pos);
++ pos += len;
++ done = 0;
++ } else {
++ for (x = buf + pos, i = 0;
++ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++ substring[i] = *x;
++ substring[i] = '\0';
++
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ if (strcmp(substring, mask_flags[i].mask_name) == 0) {
++ mask_name = mask_flags[i].mask_name;
++ mask_bitfield = mask_flags[i].mask_bitfield;
++ done = 0;
++ break;
++ }
++ }
++ }
++
++ if (mask_name != NULL) {
++ done = 0;
++ switch (add) {
++ case '-':
++ rg &= ~mask_bitfield;
++ break;
++ case '+':
++ rg |= mask_bitfield;
++ break;
++ case '=':
++ rg = mask_bitfield;
++ break;
++ default:
++ rg |= mask_bitfield;
++ break;
++ }
++ }
++ }
++
++ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
++
++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
++
++ if (rg & YAFFS_TRACE_ALWAYS) {
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ char flag;
++ flag = ((rg & mask_flags[i].mask_bitfield) ==
++ mask_flags[i].mask_bitfield) ? '+' : '-';
++ printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
++ }
++ }
++
++ return count;
++}
++
++
++static int yaffs_proc_write(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ return yaffs_proc_write_trace_options(file, buf, count, data);
++}
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++ struct file_system_type *fst;
++ int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++ {&yaffs_fs_type, 0},
++ {&yaffs2_fs_type, 0},
++ {NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++ int error = 0;
++ struct file_system_to_install *fsinst;
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs built " __DATE__ " " __TIME__ " Installing. \n")));
++
++#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR(" \n\n\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n\n")));
++#endif
++
++
++
++
++ init_MUTEX(&yaffs_context_lock);
++
++ /* Install the proc_fs entries */
++ my_proc_entry = create_proc_entry("yaffs",
++ S_IRUGO | S_IFREG,
++ YPROC_ROOT);
++
++ if (my_proc_entry) {
++ my_proc_entry->write_proc = yaffs_proc_write;
++ my_proc_entry->read_proc = yaffs_proc_read;
++ my_proc_entry->data = NULL;
++ } else
++ return -ENOMEM;
++
++ debug_proc_entry = create_proc_entry("yaffs_stats",
++ S_IRUGO | S_IFREG,
++ YPROC_ROOT);
++
++ if (debug_proc_entry) {
++ debug_proc_entry->write_proc = NULL;
++ debug_proc_entry->read_proc = yaffs_stats_proc_read;
++ debug_proc_entry->data = NULL;
++ } else
++ return -ENOMEM;
++
++ /* Now add the file system entries */
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst && !error) {
++ error = register_filesystem(fsinst->fst);
++ if (!error)
++ fsinst->installed = 1;
++ fsinst++;
++ }
++
++ /* Any errors? uninstall */
++ if (error) {
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++ }
++
++ return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++ struct file_system_to_install *fsinst;
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs built " __DATE__ " " __TIME__ " removing. \n")));
++
++ remove_proc_entry("yaffs", YPROC_ROOT);
++ remove_proc_entry("yaffs_stats", YPROC_ROOT);
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++}
++
++module_init(init_yaffs_fs)
++module_exit(exit_yaffs_fs)
++
++MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs1.c
+@@ -0,0 +1,465 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include "yaffs_yaffs1.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++
++int yaffs1_scan(yaffs_dev_t *dev)
++{
++ yaffs_ext_tags tags;
++ int blk;
++ int blockIterator;
++ int startIterator;
++ int endIterator;
++ int result;
++
++ int chunk;
++ int c;
++ int deleted;
++ yaffs_block_state_t state;
++ yaffs_obj_t *hard_list = NULL;
++ yaffs_block_info_t *bi;
++ __u32 seq_number;
++ yaffs_obj_header *oh;
++ yaffs_obj_t *in;
++ yaffs_obj_t *parent;
++
++ int alloc_failed = 0;
++
++ struct yaffs_shadow_fixer_s *shadowFixerList = NULL;
++
++
++ __u8 *chunkData;
++
++
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("yaffs1_scan starts intstartblk %d intendblk %d..." TENDSTR),
++ dev->internal_start_block, dev->internal_end_block));
++
++ chunkData = yaffs_get_temp_buffer(dev, __LINE__);
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block; blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++ state, seq_number));
++
++ if (state == YAFFS_BLOCK_STATE_DEAD) {
++ T(YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("block %d is bad" TENDSTR), blk));
++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block empty " TENDSTR)));
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ }
++ bi++;
++ }
++
++ startIterator = dev->internal_start_block;
++ endIterator = dev->internal_end_block;
++
++ /* For each block.... */
++ for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
++ blockIterator++) {
++
++ YYIELD();
++
++ YYIELD();
++
++ blk = blockIterator;
++
++ bi = yaffs_get_block_info(dev, blk);
++ state = bi->block_state;
++
++ deleted = 0;
++
++ /* For each chunk in each block that needs scanning....*/
++ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block &&
++ state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
++ /* Read the tags and decide what to do */
++ chunk = blk * dev->param.chunks_per_block + c;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++ &tags);
++
++ /* Let's have a good look at this chunk... */
++
++ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED || tags.is_deleted) {
++ /* YAFFS1 only...
++ * A deleted chunk
++ */
++ deleted++;
++ dev->n_free_chunks++;
++ /*T((" %d %d deleted\n",blk,c)); */
++ } else if (!tags.chunk_used) {
++ /* An unassigned chunk in the block
++ * This means that either the block is empty or
++ * this is the one being allocated from
++ */
++
++ if (c == 0) {
++ /* We're looking at the first chunk in the block so the block is unused */
++ state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ /* this is the block being allocated from */
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ (" Allocating from %d %d" TENDSTR),
++ blk, c));
++ state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = c;
++ dev->alloc_block_finder = blk;
++ /* Set block finder here to encourage the allocator to go forth from here. */
++
++ }
++
++ dev->n_free_chunks += (dev->param.chunks_per_block - c);
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ unsigned int endpos;
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.
++ obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ /* PutChunkIntoFile checks for a clash (two data chunks with
++ * the same chunk_id).
++ */
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in) {
++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id, chunk, 1))
++ alloc_failed = 1;
++ }
++
++ endpos =
++ (tags.chunk_id - 1) * dev->data_bytes_per_chunk +
++ tags.n_bytes;
++ if (in &&
++ in->variant_type == YAFFS_OBJECT_TYPE_FILE
++ && in->variant.file_variant.scanned_size <
++ endpos) {
++ in->variant.file_variant.
++ scanned_size = endpos;
++ if (!dev->param.use_header_file_size) {
++ in->variant.file_variant.
++ file_size =
++ in->variant.file_variant.
++ scanned_size;
++ }
++
++ }
++ /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Thus, we read in the object header and make the object
++ */
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk,
++ chunkData,
++ NULL);
++
++ oh = (yaffs_obj_header *) chunkData;
++
++ in = yaffs_find_by_number(dev,
++ tags.obj_id);
++ if (in && in->variant_type != oh->type) {
++ /* This should not happen, but somehow
++ * Wev'e ended up with an obj_id that has been reused but not yet
++ * deleted, and worse still it has changed type. Delete the old object.
++ */
++
++ yaffs_del_obj(in);
++
++ in = 0;
++ }
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.
++ obj_id,
++ oh->type);
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in && oh->shadows_obj > 0) {
++
++ struct yaffs_shadow_fixer_s *fixer;
++ fixer = YMALLOC(sizeof(struct yaffs_shadow_fixer_s));
++ if (fixer) {
++ fixer->next = shadowFixerList;
++ shadowFixerList = fixer;
++ fixer->obj_id = tags.obj_id;
++ fixer->shadowed_id = oh->shadows_obj;
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ (" Shadow fixer: %d shadows %d" TENDSTR),
++ fixer->obj_id, fixer->shadowed_id));
++
++ }
++
++ }
++
++ if (in && in->valid) {
++ /* We have already filled this one. We have a duplicate and need to resolve it. */
++
++ unsigned existingSerial = in->serial;
++ unsigned newSerial = tags.serial_number;
++
++ if (((existingSerial + 1) & 3) == newSerial) {
++ /* Use new one - destroy the exisiting one */
++ yaffs_chunk_del(dev,
++ in->hdr_chunk,
++ 1, __LINE__);
++ in->valid = 0;
++ } else {
++ /* Use existing - destroy this one. */
++ yaffs_chunk_del(dev, chunk, 1,
++ __LINE__);
++ }
++ }
++
++ if (in && !in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle with directory structure */
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++#endif
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ } else if (in && !in->valid) {
++ /* we need to load this info */
++
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++#endif
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ in->dirty = 0;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ if (!parent)
++ alloc_failed = 1;
++ if (parent && parent->variant_type ==
++ YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ dir_variant.
++ children);
++ } else if (!parent || parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, another problem....
++ * We're trying to use a non-directory as a directory
++ */
++
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ TENDSTR)));
++ parent = dev->lost_n_found;
++ }
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ if (0 && (parent == dev->del_dir ||
++ parent == dev->unlinked_dir)) {
++ in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
++ dev->n_deleted_files++;
++ }
++ /* Note re hardlinks.
++ * Since we might scan a hardlink before its equivalent object is scanned
++ * we put them all in a list.
++ * After scanning is complete, we should have all the objects, so we run through this
++ * list and fix up all the chains.
++ */
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (dev->param.use_header_file_size)
++
++ in->variant.file_variant.
++ file_size =
++ oh->file_size;
++
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ in->variant.hardlink_variant.
++ equiv_id =
++ oh->equiv_id;
++ in->hard_links.next =
++ (struct ylist_head *)
++ hard_list;
++ hard_list = in;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1;
++ break;
++ }
++
++ }
++ }
++ }
++
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ /* If we got this far while scanning, then the block is fully allocated.*/
++ state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ /* If the block was partially allocated then treat it as fully allocated.*/
++ state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++
++ bi->block_state = state;
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++ yaffs_block_became_dirty(dev, blk);
++ }
++
++ }
++
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We should now have scanned all the objects, now it's time to add these
++ * hardlinks.
++ */
++
++ yaffs_link_fixup(dev, hard_list);
++
++ /* Fix up any shadowed objects */
++ {
++ struct yaffs_shadow_fixer_s *fixer;
++ yaffs_obj_t *obj;
++
++ while (shadowFixerList) {
++ fixer = shadowFixerList;
++ shadowFixerList = fixer->next;
++ /* Complete the rename transaction by deleting the shadowed object
++ * then setting the object header to unshadowed.
++ */
++ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
++ if (obj)
++ yaffs_del_obj(obj);
++
++ obj = yaffs_find_by_number(dev, fixer->obj_id);
++
++ if (obj)
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++ YFREE(fixer);
++ }
++ }
++
++ yaffs_release_temp_buffer(dev, chunkData, __LINE__);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ T(YAFFS_TRACE_SCAN, (TSTR("yaffs1_scan ends" TENDSTR)));
++
++
++ return YAFFS_OK;
++}
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs1.h
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS1_H__
++#define __YAFFS_YAFFS1_H__
++
++#include "yaffs_guts.h"
++int yaffs1_scan(yaffs_dev_t *dev);
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs2.c
+@@ -0,0 +1,1540 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_checkptrw.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_qsort.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_verify.h"
++
++/*
++ * Checkpoints are really no benefit on very small partitions.
++ *
++ * To save space on small partitions don't bother with checkpoints unless
++ * the partition is at least this big.
++ */
++#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
++
++#define YAFFS_SMALL_HOLE_THRESHOLD 4
++
++
++/*
++ * Oldest Dirty Sequence Number handling.
++ */
++
++/* yaffs_calc_oldest_dirty_seq()
++ * yaffs2_find_oldest_dirty_seq()
++ * Calculate the oldest dirty sequence number if we don't know it.
++ */
++void yaffs_calc_oldest_dirty_seq(yaffs_dev_t *dev)
++{
++ int i;
++ unsigned seq;
++ unsigned block_no = 0;
++ yaffs_block_info_t *b;
++
++ if(!dev->param.is_yaffs2)
++ return;
++
++ /* Find the oldest dirty sequence number. */
++ seq = dev->seq_number + 1;
++ b = dev->block_info;
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
++ (b->pages_in_use - b->soft_del_pages) < dev->param.chunks_per_block &&
++ b->seq_number < seq) {
++ seq = b->seq_number;
++ block_no = i;
++ }
++ b++;
++ }
++
++ if(block_no){
++ dev->oldest_dirty_seq = seq;
++ dev->oldest_dirty_block = block_no;
++ }
++
++}
++
++
++void yaffs2_find_oldest_dirty_seq(yaffs_dev_t *dev)
++{
++ if(!dev->param.is_yaffs2)
++ return;
++
++ if(!dev->oldest_dirty_seq)
++ yaffs_calc_oldest_dirty_seq(dev);
++}
++
++/*
++ * yaffs_clear_oldest_dirty_seq()
++ * Called when a block is erased or marked bad. (ie. when its seq_number
++ * becomes invalid). If the value matches the oldest then we clear
++ * dev->oldest_dirty_seq to force its recomputation.
++ */
++void yaffs2_clear_oldest_dirty_seq(yaffs_dev_t *dev, yaffs_block_info_t *bi)
++{
++
++ if(!dev->param.is_yaffs2)
++ return;
++
++ if(!bi || bi->seq_number == dev->oldest_dirty_seq){
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
++ }
++}
++
++/*
++ * yaffs2_update_oldest_dirty_seq()
++ * Update the oldest dirty sequence number whenever we dirty a block.
++ * Only do this if the oldest_dirty_seq is actually being tracked.
++ */
++void yaffs2_update_oldest_dirty_seq(yaffs_dev_t *dev, unsigned block_no, yaffs_block_info_t *bi)
++{
++ if(!dev->param.is_yaffs2)
++ return;
++
++ if(dev->oldest_dirty_seq){
++ if(dev->oldest_dirty_seq > bi->seq_number){
++ dev->oldest_dirty_seq = bi->seq_number;
++ dev->oldest_dirty_block = block_no;
++ }
++ }
++}
++
++int yaffs_block_ok_for_gc(yaffs_dev_t *dev,
++ yaffs_block_info_t *bi)
++{
++
++ if (!dev->param.is_yaffs2)
++ return 1; /* disqualification only applies to yaffs2. */
++
++ if (!bi->has_shrink_hdr)
++ return 1; /* can gc */
++
++ yaffs2_find_oldest_dirty_seq(dev);
++
++ /* Can't do gc of this block if there are any blocks older than this one that have
++ * discarded pages.
++ */
++ return (bi->seq_number <= dev->oldest_dirty_seq);
++}
++
++/*
++ * yaffs2_find_refresh_block()
++ * periodically finds the oldest full block by sequence number for refreshing.
++ * Only for yaffs2.
++ */
++__u32 yaffs2_find_refresh_block(yaffs_dev_t *dev)
++{
++ __u32 b ;
++
++ __u32 oldest = 0;
++ __u32 oldestSequence = 0;
++
++ yaffs_block_info_t *bi;
++
++ if(!dev->param.is_yaffs2)
++ return oldest;
++
++ /*
++ * If refresh period < 10 then refreshing is disabled.
++ */
++ if(dev->param.refresh_period < 10)
++ return oldest;
++
++ /*
++ * Fix broken values.
++ */
++ if(dev->refresh_skip > dev->param.refresh_period)
++ dev->refresh_skip = dev->param.refresh_period;
++
++ if(dev->refresh_skip > 0)
++ return oldest;
++
++ /*
++ * Refresh skip is now zero.
++ * We'll do a refresh this time around....
++ * Update the refresh skip and find the oldest block.
++ */
++ dev->refresh_skip = dev->param.refresh_period;
++ dev->refresh_count++;
++ bi = dev->block_info;
++ for (b = dev->internal_start_block; b <=dev->internal_end_block; b++){
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL){
++
++ if(oldest < 1 ||
++ bi->seq_number < oldestSequence){
++ oldest = b;
++ oldestSequence = bi->seq_number;
++ }
++ }
++ bi++;
++ }
++
++ if (oldest > 0) {
++ T(YAFFS_TRACE_GC,
++ (TSTR("GC refresh count %d selected block %d with seq_number %d" TENDSTR),
++ dev->refresh_count, oldest, oldestSequence));
++ }
++
++ return oldest;
++}
++
++int yaffs2_checkpt_required(yaffs_dev_t *dev)
++{
++ int nblocks;
++
++ if(!dev->param.is_yaffs2)
++ return 0;
++
++ nblocks = dev->internal_end_block - dev->internal_start_block + 1 ;
++
++ return !dev->param.skip_checkpt_wr &&
++ !dev->read_only &&
++ (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
++}
++
++int yaffs_calc_checkpt_blocks_required(yaffs_dev_t *dev)
++{
++ int retval;
++
++ if(!dev->param.is_yaffs2)
++ return 0;
++
++ if (!dev->checkpoint_blocks_required &&
++ yaffs2_checkpt_required(dev)){
++ /* Not a valid value so recalculate */
++ int n_bytes = 0;
++ int nBlocks;
++ int devBlocks = (dev->param.end_block - dev->param.start_block + 1);
++
++ n_bytes += sizeof(yaffs_checkpt_validty_t);
++ n_bytes += sizeof(yaffs_checkpt_dev_t);
++ n_bytes += devBlocks * sizeof(yaffs_block_info_t);
++ n_bytes += devBlocks * dev->chunk_bit_stride;
++ n_bytes += (sizeof(yaffs_checkpt_obj_t) + sizeof(__u32)) * (dev->n_obj);
++ n_bytes += (dev->tnode_size + sizeof(__u32)) * (dev->n_tnodes);
++ n_bytes += sizeof(yaffs_checkpt_validty_t);
++ n_bytes += sizeof(__u32); /* checksum*/
++
++ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
++
++ nBlocks = (n_bytes/(dev->data_bytes_per_chunk * dev->param.chunks_per_block)) + 3;
++
++ dev->checkpoint_blocks_required = nBlocks;
++ }
++
++ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
++ if(retval < 0)
++ retval = 0;
++ return retval;
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++
++static int yaffs2_wr_checkpt_validity_marker(yaffs_dev_t *dev, int head)
++{
++ yaffs_checkpt_validty_t cp;
++
++ memset(&cp, 0, sizeof(cp));
++
++ cp.struct_type = sizeof(cp);
++ cp.magic = YAFFS_MAGIC;
++ cp.version = YAFFS_CHECKPOINT_VERSION;
++ cp.head = (head) ? 1 : 0;
++
++ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
++ 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_validty_marker(yaffs_dev_t *dev, int head)
++{
++ yaffs_checkpt_validty_t cp;
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ if (ok)
++ ok = (cp.struct_type == sizeof(cp)) &&
++ (cp.magic == YAFFS_MAGIC) &&
++ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++ (cp.head == ((head) ? 1 : 0));
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_dev_to_checkpt_dev(yaffs_checkpt_dev_t *cp,
++ yaffs_dev_t *dev)
++{
++ cp->n_erased_blocks = dev->n_erased_blocks;
++ cp->alloc_block = dev->alloc_block;
++ cp->alloc_page = dev->alloc_page;
++ cp->n_free_chunks = dev->n_free_chunks;
++
++ cp->n_deleted_files = dev->n_deleted_files;
++ cp->n_unlinked_files = dev->n_unlinked_files;
++ cp->n_bg_deletions = dev->n_bg_deletions;
++ cp->seq_number = dev->seq_number;
++
++}
++
++static void yaffs_checkpt_dev_to_dev(yaffs_dev_t *dev,
++ yaffs_checkpt_dev_t *cp)
++{
++ dev->n_erased_blocks = cp->n_erased_blocks;
++ dev->alloc_block = cp->alloc_block;
++ dev->alloc_page = cp->alloc_page;
++ dev->n_free_chunks = cp->n_free_chunks;
++
++ dev->n_deleted_files = cp->n_deleted_files;
++ dev->n_unlinked_files = cp->n_unlinked_files;
++ dev->n_bg_deletions = cp->n_bg_deletions;
++ dev->seq_number = cp->seq_number;
++}
++
++
++static int yaffs2_wr_checkpt_dev(yaffs_dev_t *dev)
++{
++ yaffs_checkpt_dev_t cp;
++ __u32 n_bytes;
++ __u32 nBlocks = (dev->internal_end_block - dev->internal_start_block + 1);
++
++ int ok;
++
++ /* Write device runtime values*/
++ yaffs2_dev_to_checkpt_dev(&cp, dev);
++ cp.struct_type = sizeof(cp);
++
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ /* Write block info */
++ if (ok) {
++ n_bytes = nBlocks * sizeof(yaffs_block_info_t);
++ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
++ }
++
++ /* Write chunk bits */
++ if (ok) {
++ n_bytes = nBlocks * dev->chunk_bit_stride;
++ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
++ }
++ return ok ? 1 : 0;
++
++}
++
++static int yaffs2_rd_checkpt_dev(yaffs_dev_t *dev)
++{
++ yaffs_checkpt_dev_t cp;
++ __u32 n_bytes;
++ __u32 nBlocks = (dev->internal_end_block - dev->internal_start_block + 1);
++
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
++ return 0;
++
++ if (cp.struct_type != sizeof(cp))
++ return 0;
++
++
++ yaffs_checkpt_dev_to_dev(dev, &cp);
++
++ n_bytes = nBlocks * sizeof(yaffs_block_info_t);
++
++ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
++
++ if (!ok)
++ return 0;
++ n_bytes = nBlocks * dev->chunk_bit_stride;
++
++ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_obj_checkpt_obj(yaffs_checkpt_obj_t *cp,
++ yaffs_obj_t *obj)
++{
++
++ cp->obj_id = obj->obj_id;
++ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
++ cp->hdr_chunk = obj->hdr_chunk;
++ cp->variant_type = obj->variant_type;
++ cp->deleted = obj->deleted;
++ cp->soft_del = obj->soft_del;
++ cp->unlinked = obj->unlinked;
++ cp->fake = obj->fake;
++ cp->rename_allowed = obj->rename_allowed;
++ cp->unlink_allowed = obj->unlink_allowed;
++ cp->serial = obj->serial;
++ cp->n_data_chunks = obj->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
++}
++
++static int taffs2_checkpt_obj_to_obj(yaffs_obj_t *obj, yaffs_checkpt_obj_t *cp)
++{
++
++ yaffs_obj_t *parent;
++
++ if (obj->variant_type != cp->variant_type) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
++ TCONT("chunk %d does not match existing object type %d")
++ TENDSTR), cp->obj_id, cp->variant_type, cp->hdr_chunk,
++ obj->variant_type));
++ return 0;
++ }
++
++ obj->obj_id = cp->obj_id;
++
++ if (cp->parent_id)
++ parent = yaffs_find_or_create_by_number(
++ obj->my_dev,
++ cp->parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ else
++ parent = NULL;
++
++ if (parent) {
++ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
++ TCONT(" chunk %d Parent type, %d, not directory")
++ TENDSTR),
++ cp->obj_id, cp->parent_id, cp->variant_type,
++ cp->hdr_chunk, parent->variant_type));
++ return 0;
++ }
++ yaffs_add_obj_to_dir(parent, obj);
++ }
++
++ obj->hdr_chunk = cp->hdr_chunk;
++ obj->variant_type = cp->variant_type;
++ obj->deleted = cp->deleted;
++ obj->soft_del = cp->soft_del;
++ obj->unlinked = cp->unlinked;
++ obj->fake = cp->fake;
++ obj->rename_allowed = cp->rename_allowed;
++ obj->unlink_allowed = cp->unlink_allowed;
++ obj->serial = cp->serial;
++ obj->n_data_chunks = cp->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
++
++ if (obj->hdr_chunk > 0)
++ obj->lazy_loaded = 1;
++ return 1;
++}
++
++
++
++static int yaffs2_checkpt_tnode_worker(yaffs_obj_t *in, yaffs_tnode_t *tn,
++ __u32 level, int chunk_offset)
++{
++ int i;
++ yaffs_dev_t *dev = in->my_dev;
++ int ok = 1;
++
++ if (tn) {
++ if (level > 0) {
++
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++ if (tn->internal[i]) {
++ ok = yaffs2_checkpt_tnode_worker(in,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++ }
++ }
++ } else if (level == 0) {
++ __u32 baseOffset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
++ ok = (yaffs2_checkpt_wr(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) == dev->tnode_size);
++ }
++ }
++
++ return ok;
++
++}
++
++static int yaffs2_wr_checkpt_tnodes(yaffs_obj_t *obj)
++{
++ __u32 endMarker = ~0;
++ int ok = 1;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++ ok = yaffs2_checkpt_tnode_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.file_variant.top_level,
++ 0);
++ if (ok)
++ ok = (yaffs2_checkpt_wr(obj->my_dev, &endMarker, sizeof(endMarker)) ==
++ sizeof(endMarker));
++ }
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_tnodes(yaffs_obj_t *obj)
++{
++ __u32 baseChunk;
++ int ok = 1;
++ yaffs_dev_t *dev = obj->my_dev;
++ yaffs_file_s *fileStructPtr = &obj->variant.file_variant;
++ yaffs_tnode_t *tn;
++ int nread = 0;
++
++ ok = (yaffs2_checkpt_rd(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++ while (ok && (~baseChunk)) {
++ nread++;
++ /* Read level 0 tnode */
++
++
++ tn = yaffs_get_tnode(dev);
++ if (tn){
++ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) == dev->tnode_size);
++ } else
++ ok = 0;
++
++ if (tn && ok)
++ ok = yaffs_add_find_tnode_0(dev,
++ fileStructPtr,
++ baseChunk,
++ tn) ? 1 : 0;
++
++ if (ok)
++ ok = (yaffs2_checkpt_rd(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++ }
++
++ T(YAFFS_TRACE_CHECKPOINT, (
++ TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
++ nread, baseChunk, ok));
++
++ return ok ? 1 : 0;
++}
++
++
++static int yaffs2_wr_checkpt_objs(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ yaffs_checkpt_obj_t cp;
++ int i;
++ int ok = 1;
++ struct ylist_head *lh;
++
++
++ /* Iterate through the objects in each hash entry,
++ * dumping them to the checkpointing stream.
++ */
++
++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->obj_bucket[i].list) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, hash_link);
++ if (!obj->defered_free) {
++ yaffs2_obj_checkpt_obj(&cp, obj);
++ cp.struct_type = sizeof(cp);
++
++ T(YAFFS_TRACE_CHECKPOINT, (
++ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %p" TENDSTR),
++ cp.obj_id, cp.parent_id, cp.variant_type, cp.hdr_chunk, obj));
++
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ if (ok && obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ ok = yaffs2_wr_checkpt_tnodes(obj);
++ }
++ }
++ }
++ }
++
++ /* Dump end of list */
++ memset(&cp, 0xFF, sizeof(yaffs_checkpt_obj_t));
++ cp.struct_type = sizeof(cp);
++
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_objs(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ yaffs_checkpt_obj_t cp;
++ int ok = 1;
++ int done = 0;
++ yaffs_obj_t *hard_list = NULL;
++
++ while (ok && !done) {
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (cp.struct_type != sizeof(cp)) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
++ cp.struct_type, (int)sizeof(cp), ok));
++ ok = 0;
++ }
++
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
++ cp.obj_id, cp.parent_id, cp.variant_type, cp.hdr_chunk));
++
++ if (ok && cp.obj_id == ~0)
++ done = 1;
++ else if (ok) {
++ obj = yaffs_find_or_create_by_number(dev, cp.obj_id, cp.variant_type);
++ if (obj) {
++ ok = taffs2_checkpt_obj_to_obj(obj, &cp);
++ if (!ok)
++ break;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++ ok = yaffs2_rd_checkpt_tnodes(obj);
++ } else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ obj->hard_links.next =
++ (struct ylist_head *) hard_list;
++ hard_list = obj;
++ }
++ } else
++ ok = 0;
++ }
++ }
++
++ if (ok)
++ yaffs_link_fixup(dev, hard_list);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_sum(yaffs_dev_t *dev)
++{
++ __u32 checkpt_sum;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
++
++ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) == sizeof(checkpt_sum));
++
++ if (!ok)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs2_rd_checkpt_sum(yaffs_dev_t *dev)
++{
++ __u32 checkpt_sum0;
++ __u32 checkpt_sum1;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
++
++ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) == sizeof(checkpt_sum1));
++
++ if (!ok)
++ return 0;
++
++ if (checkpt_sum0 != checkpt_sum1)
++ return 0;
++
++ return 1;
++}
++
++
++static int yaffs2_wr_checkpt_data(yaffs_dev_t *dev)
++{
++ int ok = 1;
++
++ if (!yaffs2_checkpt_required(dev)) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 1);
++
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
++ ok = yaffs2_wr_checkpt_dev(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
++ ok = yaffs2_wr_checkpt_objs(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
++ }
++
++ if (ok)
++ ok = yaffs2_wr_checkpt_sum(dev);
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return dev->is_checkpointed;
++}
++
++static int yaffs2_rd_checkpt_data(yaffs_dev_t *dev)
++{
++ int ok = 1;
++
++ if(!dev->param.is_yaffs2)
++ ok = 0;
++
++ if (ok && dev->param.skip_checkpt_rd) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
++
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs2_rd_checkpt_validty_marker(dev, 1);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
++ ok = yaffs2_rd_checkpt_dev(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
++ ok = yaffs2_rd_checkpt_objs(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs2_rd_checkpt_validty_marker(dev, 0);
++ }
++
++ if (ok) {
++ ok = yaffs2_rd_checkpt_sum(dev);
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
++ }
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return ok ? 1 : 0;
++
++}
++
++void yaffs2_checkpt_invalidate(yaffs_dev_t *dev)
++{
++ if (dev->is_checkpointed ||
++ dev->blocks_in_checkpt > 0) {
++ dev->is_checkpointed = 0;
++ yaffs2_checkpt_invalidate_stream(dev);
++ }
++ if (dev->param.sb_dirty_fn)
++ dev->param.sb_dirty_fn(dev);
++}
++
++
++int yaffs_checkpoint_save(yaffs_dev_t *dev)
++{
++
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++
++ if (!dev->is_checkpointed) {
++ yaffs2_checkpt_invalidate(dev);
++ yaffs2_wr_checkpt_data(dev);
++ }
++
++ T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ return dev->is_checkpointed;
++}
++
++int yaffs2_checkpt_restore(yaffs_dev_t *dev)
++{
++ int retval;
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ retval = yaffs2_rd_checkpt_data(dev);
++
++ if (dev->is_checkpointed) {
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++ }
++
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ return retval;
++}
++
++int yaffs2_handle_hole(yaffs_obj_t *obj, loff_t new_size)
++{
++ /* if newsSize > oldFileSize.
++ * We're going to be writing a hole.
++ * If the hole is small then write zeros otherwise write a start of hole marker.
++ */
++
++
++ loff_t oldFileSize;
++ int increase;
++ int smallHole ;
++ int result = YAFFS_OK;
++ yaffs_dev_t *dev = NULL;
++
++ __u8 *localBuffer = NULL;
++
++ int smallIncreaseOk = 0;
++
++ if(!obj)
++ return YAFFS_FAIL;
++
++ if(obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ dev = obj->my_dev;
++
++ /* Bail out if not yaffs2 mode */
++ if(!dev->param.is_yaffs2)
++ return YAFFS_OK;
++
++ oldFileSize = obj->variant.file_variant.file_size;
++
++ if (new_size <= oldFileSize)
++ return YAFFS_OK;
++
++ increase = new_size - oldFileSize;
++
++ if(increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
++ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
++ smallHole = 1;
++ else
++ smallHole = 0;
++
++ if(smallHole)
++ localBuffer= yaffs_get_temp_buffer(dev, __LINE__);
++
++ if(localBuffer){
++ /* fill hole with zero bytes */
++ int pos = oldFileSize;
++ int thisWrite;
++ int written;
++ memset(localBuffer,0,dev->data_bytes_per_chunk);
++ smallIncreaseOk = 1;
++
++ while(increase > 0 && smallIncreaseOk){
++ thisWrite = increase;
++ if(thisWrite > dev->data_bytes_per_chunk)
++ thisWrite = dev->data_bytes_per_chunk;
++ written = yaffs_do_file_wr(obj,localBuffer,pos,thisWrite,0);
++ if(written == thisWrite){
++ pos += thisWrite;
++ increase -= thisWrite;
++ } else
++ smallIncreaseOk = 0;
++ }
++
++ yaffs_release_temp_buffer(dev,localBuffer,__LINE__);
++
++ /* If we were out of space then reverse any chunks we've added */
++ if(!smallIncreaseOk)
++ yaffs_resize_file_down(obj, oldFileSize);
++ }
++
++ if (!smallIncreaseOk &&
++ obj->parent &&
++ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED){
++ /* Write a hole start header with the old file size */
++ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
++ }
++
++ return result;
++
++}
++
++
++typedef struct {
++ int seq;
++ int block;
++} yaffs_BlockIndex;
++
++
++static int yaffs2_ybicmp(const void *a, const void *b)
++{
++ register int aseq = ((yaffs_BlockIndex *)a)->seq;
++ register int bseq = ((yaffs_BlockIndex *)b)->seq;
++ register int ablock = ((yaffs_BlockIndex *)a)->block;
++ register int bblock = ((yaffs_BlockIndex *)b)->block;
++ if (aseq == bseq)
++ return ablock - bblock;
++ else
++ return aseq - bseq;
++}
++
++int yaffs2_scan_backwards(yaffs_dev_t *dev)
++{
++ yaffs_ext_tags tags;
++ int blk;
++ int blockIterator;
++ int startIterator;
++ int endIterator;
++ int nBlocksToScan = 0;
++
++ int chunk;
++ int result;
++ int c;
++ int deleted;
++ yaffs_block_state_t state;
++ yaffs_obj_t *hard_list = NULL;
++ yaffs_block_info_t *bi;
++ __u32 seq_number;
++ yaffs_obj_header *oh;
++ yaffs_obj_t *in;
++ yaffs_obj_t *parent;
++ int nBlocks = dev->internal_end_block - dev->internal_start_block + 1;
++ int itsUnlinked;
++ __u8 *chunkData;
++
++ int file_size;
++ int is_shrink;
++ int foundChunksInBlock;
++ int equiv_id;
++ int alloc_failed = 0;
++
++
++ yaffs_BlockIndex *blockIndex = NULL;
++ int altBlockIndex = 0;
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ ("yaffs2_scan_backwards starts intstartblk %d intendblk %d..."
++ TENDSTR), dev->internal_start_block, dev->internal_end_block));
++
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++
++ if (!blockIndex) {
++ blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
++ altBlockIndex = 1;
++ }
++
++ if (!blockIndex) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("yaffs2_scan_backwards() could not allocate block index!" TENDSTR)));
++ return YAFFS_FAIL;
++ }
++
++ dev->blocks_in_checkpt = 0;
++
++ chunkData = yaffs_get_temp_buffer(dev, __LINE__);
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block; blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ bi->block_state = state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++ state, seq_number));
++
++
++ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ dev->blocks_in_checkpt++;
++
++ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
++ T(YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("block %d is bad" TENDSTR), blk));
++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block empty " TENDSTR)));
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++
++ /* Determine the highest sequence number */
++ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++
++ blockIndex[nBlocksToScan].seq = seq_number;
++ blockIndex[nBlocksToScan].block = blk;
++
++ nBlocksToScan++;
++
++ if (seq_number >= dev->seq_number)
++ dev->seq_number = seq_number;
++ } else {
++ /* TODO: Nasty sequence number! */
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ ("Block scanning block %d has bad sequence number %d"
++ TENDSTR), blk, seq_number));
++
++ }
++ }
++ bi++;
++ }
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
++
++
++
++ YYIELD();
++
++ /* Sort the blocks by sequence number*/
++ yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), yaffs2_ybicmp);
++
++ YYIELD();
++
++ T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++
++ /* Now scan the blocks looking at the data. */
++ startIterator = 0;
++ endIterator = nBlocksToScan - 1;
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++
++ /* For each block.... backwards */
++ for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
++ blockIterator--) {
++ /* Cooperative multitasking! This loop can run for so
++ long that watchdog timers expire. */
++ YYIELD();
++
++ /* get the block to scan in the correct order */
++ blk = blockIndex[blockIterator].block;
++
++ bi = yaffs_get_block_info(dev, blk);
++
++
++ state = bi->block_state;
++
++ deleted = 0;
++
++ /* For each chunk in each block that needs scanning.... */
++ foundChunksInBlock = 0;
++ for (c = dev->param.chunks_per_block - 1;
++ !alloc_failed && c >= 0 &&
++ (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++ state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
++ /* Scan backwards...
++ * Read the tags and decide what to do
++ */
++
++ chunk = blk * dev->param.chunks_per_block + c;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++ &tags);
++
++ /* Let's have a good look at this chunk... */
++
++ if (!tags.chunk_used) {
++ /* An unassigned chunk in the block.
++ * If there are used chunks after this one, then
++ * it is a chunk that was skipped due to failing the erased
++ * check. Just skip it so that it can be deleted.
++ * But, more typically, We get here when this is an unallocated
++ * chunk and his means that either the block is empty or
++ * this is the one being allocated from
++ */
++
++ if (foundChunksInBlock) {
++ /* This is a chunk that was skipped due to failing the erased check */
++ } else if (c == 0) {
++ /* We're looking at the first chunk in the block so the block is unused */
++ state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++ state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ if (dev->seq_number == bi->seq_number) {
++ /* this is the block being allocated from */
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ (" Allocating from %d %d"
++ TENDSTR), blk, c));
++
++ state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = c;
++ dev->alloc_block_finder = blk;
++ } else {
++ /* This is a partially written block that is not
++ * the current allocation block.
++ */
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Partially written block %d detected" TENDSTR),
++ blk));
++ }
++ }
++ }
++
++ dev->n_free_chunks++;
++
++ } else if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
++ blk, c));
++
++ dev->n_free_chunks++;
++
++ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
++ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
++ (tags.chunk_id > 0 && tags.n_bytes > dev->data_bytes_per_chunk) ||
++ tags.seq_number != bi->seq_number ) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored"TENDSTR),
++ blk, c,tags.obj_id, tags.chunk_id, tags.n_bytes));
++
++ dev->n_free_chunks++;
++
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ unsigned int endpos;
++ __u32 chunkBase =
++ (tags.chunk_id - 1) * dev->data_bytes_per_chunk;
++
++ foundChunksInBlock = 1;
++
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.
++ obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ if (!in) {
++ /* Out of memory */
++ alloc_failed = 1;
++ }
++
++ if (in &&
++ in->variant_type == YAFFS_OBJECT_TYPE_FILE
++ && chunkBase < in->variant.file_variant.shrink_size) {
++ /* This has not been invalidated by a resize */
++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id, chunk, -1)) {
++ alloc_failed = 1;
++ }
++
++ /* File size is calculated by looking at the data chunks if we have not
++ * seen an object header yet. Stop this practice once we find an object header.
++ */
++ endpos = chunkBase + tags.n_bytes;
++
++ if (!in->valid && /* have not got an object header yet */
++ in->variant.file_variant.scanned_size < endpos) {
++ in->variant.file_variant.scanned_size = endpos;
++ in->variant.file_variant.file_size = endpos;
++ }
++
++ } else if (in) {
++ /* This chunk has been invalidated by a resize, or a past file deletion
++ * so delete the chunk*/
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++
++ }
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Thus, we read in the object header and make the object
++ */
++ foundChunksInBlock = 1;
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ oh = NULL;
++ in = NULL;
++
++ if (tags.extra_available) {
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ tags.extra_obj_type);
++ if (!in)
++ alloc_failed = 1;
++ }
++
++ if (!in ||
++ (!in->valid && dev->param.disable_lazy_load) ||
++ tags.extra_shadows ||
++ (!in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
++
++ /* If we don't have valid info then we need to read the chunk
++ * TODO In future we can probably defer reading the chunk and
++ * living with invalid data until needed.
++ */
++
++ result = yaffs_rd_chunk_tags_nand(dev,
++ chunk,
++ chunkData,
++ NULL);
++
++ oh = (yaffs_obj_header *) chunkData;
++
++ if (dev->param.inband_tags) {
++ /* Fix up the header if they got corrupted by inband tags */
++ oh->shadows_obj = oh->inband_shadowed_obj_id;
++ oh->is_shrink = oh->inband_is_shrink;
++ }
++
++ if (!in) {
++ in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type);
++ if (!in)
++ alloc_failed = 1;
++ }
++
++ }
++
++ if (!in) {
++ /* TODO Hoosterman we have a problem! */
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: Could not make object for object %d at chunk %d during scan"
++ TENDSTR), tags.obj_id, chunk));
++ continue;
++ }
++
++ if (in->valid) {
++ /* We have already filled this one.
++ * We have a duplicate that will be discarded, but
++ * we first have to suck out resize info if it is a file.
++ */
++
++ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
++ ((oh &&
++ oh->type == YAFFS_OBJECT_TYPE_FILE) ||
++ (tags.extra_available &&
++ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE))) {
++ __u32 thisSize =
++ (oh) ? oh->file_size : tags.
++ extra_length;
++ __u32 parent_obj_id =
++ (oh) ? oh->
++ parent_obj_id : tags.
++ extra_parent_id;
++
++
++ is_shrink =
++ (oh) ? oh->is_shrink : tags.
++ extra_is_shrink;
++
++ /* If it is deleted (unlinked at start also means deleted)
++ * we treat the file size as being zeroed at this point.
++ */
++ if (parent_obj_id ==
++ YAFFS_OBJECTID_DELETED
++ || parent_obj_id ==
++ YAFFS_OBJECTID_UNLINKED) {
++ thisSize = 0;
++ is_shrink = 1;
++ }
++
++ if (is_shrink && in->variant.file_variant.shrink_size > thisSize)
++ in->variant.file_variant.shrink_size = thisSize;
++
++ if (is_shrink)
++ bi->has_shrink_hdr = 1;
++
++ }
++ /* Use existing - destroy this one. */
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++
++ }
++
++ if (!in->valid && in->variant_type !=
++ (oh ? oh->type : tags.extra_obj_type))
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("yaffs tragedy: Bad object type, "
++ TCONT("%d != %d, for object %d at chunk ")
++ TCONT("%d during scan")
++ TENDSTR), oh ?
++ oh->type : tags.extra_obj_type,
++ in->variant_type, tags.obj_id,
++ chunk));
++
++ if (!in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id ==
++ YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle with directory structure */
++ in->valid = 1;
++
++ if (oh) {
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++
++ in->lazy_loaded = 0;
++
++#endif
++ } else
++ in->lazy_loaded = 1;
++
++ in->hdr_chunk = chunk;
++
++ } else if (!in->valid) {
++ /* we need to load this info */
++
++ in->valid = 1;
++ in->hdr_chunk = chunk;
++
++ if (oh) {
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++#endif
++
++ if (oh->shadows_obj > 0)
++ yaffs_handle_shadowed_obj(dev,
++ oh->
++ shadows_obj,
++ 1);
++
++
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++
++ file_size = oh->file_size;
++ is_shrink = oh->is_shrink;
++ equiv_id = oh->equiv_id;
++
++ } else {
++ in->variant_type = tags.extra_obj_type;
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, tags.extra_parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ file_size = tags.extra_length;
++ is_shrink = tags.extra_is_shrink;
++ equiv_id = tags.extra_equiv_id;
++ in->lazy_loaded = 1;
++
++ }
++ in->dirty = 0;
++
++ if (!parent)
++ alloc_failed = 1;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ if (parent && parent->variant_type ==
++ YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ dir_variant.
++ children);
++ } else if (!parent || parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, another problem....
++ * We're trying to use a non-directory as a directory
++ */
++
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ TENDSTR)));
++ parent = dev->lost_n_found;
++ }
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ itsUnlinked = (parent == dev->del_dir) ||
++ (parent == dev->unlinked_dir);
++
++ if (is_shrink) {
++ /* Mark the block as having a shrinkHeader */
++ bi->has_shrink_hdr = 1;
++ }
++
++ /* Note re hardlinks.
++ * Since we might scan a hardlink before its equivalent object is scanned
++ * we put them all in a list.
++ * After scanning is complete, we should have all the objects, so we run
++ * through this list and fix up all the chains.
++ */
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++
++ if (in->variant.file_variant.
++ scanned_size < file_size) {
++ /* This covers the case where the file size is greater
++ * than where the data is
++ * This will happen if the file is resized to be larger
++ * than its current data extents.
++ */
++ in->variant.file_variant.file_size = file_size;
++ in->variant.file_variant.scanned_size = file_size;
++ }
++
++ if (in->variant.file_variant.shrink_size > file_size)
++ in->variant.file_variant.shrink_size = file_size;
++
++
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ if (!itsUnlinked) {
++ in->variant.hardlink_variant.equiv_id =
++ equiv_id;
++ in->hard_links.next =
++ (struct ylist_head *) hard_list;
++ hard_list = in;
++ }
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (oh) {
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1;
++ }
++ break;
++ }
++
++ }
++
++ }
++
++ } /* End of scanning for each chunk */
++
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ /* If we got this far while scanning, then the block is fully allocated. */
++ state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++
++ bi->block_state = state;
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++ yaffs_block_became_dirty(dev, blk);
++ }
++
++ }
++
++ yaffs_skip_rest_of_block(dev);
++
++ if (altBlockIndex)
++ YFREE_ALT(blockIndex);
++ else
++ YFREE(blockIndex);
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We should now have scanned all the objects, now it's time to add these
++ * hardlinks.
++ */
++ yaffs_link_fixup(dev, hard_list);
++
++
++ yaffs_release_temp_buffer(dev, chunkData, __LINE__);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ T(YAFFS_TRACE_SCAN, (TSTR("yaffs2_scan_backwards ends" TENDSTR)));
++
++ return YAFFS_OK;
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs2.h
+@@ -0,0 +1,36 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __YAFFS_YAFFS2_H__
++#define __YAFFS_YAFFS2_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_calc_oldest_dirty_seq(yaffs_dev_t *dev);
++void yaffs2_find_oldest_dirty_seq(yaffs_dev_t *dev);
++void yaffs2_clear_oldest_dirty_seq(yaffs_dev_t *dev, yaffs_block_info_t *bi);
++void yaffs2_update_oldest_dirty_seq(yaffs_dev_t *dev, unsigned block_no, yaffs_block_info_t *bi);
++int yaffs_block_ok_for_gc(yaffs_dev_t *dev, yaffs_block_info_t *bi);
++__u32 yaffs2_find_refresh_block(yaffs_dev_t *dev);
++int yaffs2_checkpt_required(yaffs_dev_t *dev);
++int yaffs_calc_checkpt_blocks_required(yaffs_dev_t *dev);
++
++
++void yaffs2_checkpt_invalidate(yaffs_dev_t *dev);
++int yaffs2_checkpt_save(yaffs_dev_t *dev);
++int yaffs2_checkpt_restore(yaffs_dev_t *dev);
++
++int yaffs2_handle_hole(yaffs_obj_t *obj, loff_t new_size);
++int yaffs2_scan_backwards(yaffs_dev_t *dev);
++
++#endif
+--- a/fs/yaffs2/yportenv.h
++++ b/fs/yaffs2/yportenv.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -41,12 +41,14 @@
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ #include <linux/config.h>
+ #endif
++
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
++#include <linux/xattr.h>
+
+ #define YCHAR char
+ #define YUCHAR unsigned char
+@@ -55,11 +57,11 @@
+ #define yaffs_strcpy(a, b) strcpy(a, b)
+ #define yaffs_strncpy(a, b, c) strncpy(a, b, c)
+ #define yaffs_strncmp(a, b, c) strncmp(a, b, c)
+-#define yaffs_strlen(s) strlen(s)
++#define yaffs_strnlen(s,m) strnlen(s,m)
+ #define yaffs_sprintf sprintf
+ #define yaffs_toupper(a) toupper(a)
+
+-#define Y_INLINE inline
++#define Y_INLINE __inline__
+
+ #define YAFFS_LOSTNFOUND_NAME "lost+found"
+ #define YAFFS_LOSTNFOUND_PREFIX "obj"
+@@ -71,11 +73,11 @@
+ #define YFREE_ALT(x) vfree(x)
+ #define YMALLOC_DMA(x) YMALLOC(x)
+
+-/* KR - added for use in scan so processes aren't blocked indefinitely. */
+ #define YYIELD() schedule()
++#define Y_DUMP_STACK() dump_stack()
+
+-#define YAFFS_ROOT_MODE 0666
+-#define YAFFS_LOSTNFOUND_MODE 0666
++#define YAFFS_ROOT_MODE 0755
++#define YAFFS_LOSTNFOUND_MODE 0700
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ #define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+@@ -85,19 +87,14 @@
+ #define Y_TIME_CONVERT(x) (x)
+ #endif
+
+-#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_sum_cmp(x, y) ((x) == (y))
+ #define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #define TENDSTR "\n"
+-#define TSTR(x) KERN_WARNING x
++#define TSTR(x) KERN_DEBUG x
+ #define TCONT(x) x
+ #define TOUT(p) printk p
+
+-#define yaffs_trace(mask, fmt, args...) \
+- do { if ((mask) & (yaffs_traceMask|YAFFS_TRACE_ERROR)) \
+- printk(KERN_WARNING "yaffs: " fmt, ## args); \
+- } while (0)
+-
+ #define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+@@ -116,7 +113,6 @@
+ #include "stdio.h"
+ #include "string.h"
+
+-#include "devextras.h"
+
+ #define YMALLOC(x) malloc(x)
+ #define YFREE(x) free(x)
+@@ -129,7 +125,7 @@
+ #define yaffs_strcat(a, b) strcat(a, b)
+ #define yaffs_strcpy(a, b) strcpy(a, b)
+ #define yaffs_strncpy(a, b, c) strncpy(a, b, c)
+-#define yaffs_strlen(s) strlen(s)
++#define yaffs_strnlen(s,m) strnlen(s,m)
+ #define yaffs_sprintf sprintf
+ #define yaffs_toupper(a) toupper(a)
+
+@@ -146,10 +142,10 @@
+ #define YAFFS_LOSTNFOUND_PREFIX "obj"
+ /* #define YPRINTF(x) printf x */
+
+-#define YAFFS_ROOT_MODE 0666
+-#define YAFFS_LOSTNFOUND_MODE 0666
++#define YAFFS_ROOT_MODE 0755
++#define YAFFS_LOSTNFOUND_MODE 0700
+
+-#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_sum_cmp(x, y) ((x) == (y))
+ #define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #else
+@@ -158,46 +154,180 @@
+
+ #endif
+
+-/* see yaffs_fs.c */
+-extern unsigned int yaffs_traceMask;
+-extern unsigned int yaffs_wr_attempts;
++#if defined(CONFIG_YAFFS_DIRECT) || defined(CONFIG_YAFFS_WINCE)
+
+-/*
+- * Tracing flags.
+- * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+- */
++#ifdef CONFIG_YAFFSFS_PROVIDE_VALUES
++
++#ifndef O_RDONLY
++#define O_RDONLY 00
++#endif
++
++#ifndef O_WRONLY
++#define O_WRONLY 01
++#endif
++
++#ifndef O_RDWR
++#define O_RDWR 02
++#endif
++
++#ifndef O_CREAT
++#define O_CREAT 0100
++#endif
++
++#ifndef O_EXCL
++#define O_EXCL 0200
++#endif
++
++#ifndef O_TRUNC
++#define O_TRUNC 01000
++#endif
++
++#ifndef O_APPEND
++#define O_APPEND 02000
++#endif
++
++#ifndef SEEK_SET
++#define SEEK_SET 0
++#endif
++
++#ifndef SEEK_CUR
++#define SEEK_CUR 1
++#endif
++
++#ifndef SEEK_END
++#define SEEK_END 2
++#endif
++
++#ifndef EBUSY
++#define EBUSY 16
++#endif
++
++#ifndef ENODEV
++#define ENODEV 19
++#endif
++
++#ifndef EINVAL
++#define EINVAL 22
++#endif
++
++#ifndef EBADF
++#define EBADF 9
++#endif
++
++#ifndef EACCES
++#define EACCES 13
++#endif
++
++#ifndef EXDEV
++#define EXDEV 18
++#endif
++
++#ifndef ENOENT
++#define ENOENT 2
++#endif
++
++#ifndef ENOSPC
++#define ENOSPC 28
++#endif
++
++#ifndef ERANGE
++#define ERANGE 34
++#endif
++
++#ifndef ENODATA
++#define ENODATA 61
++#endif
++
++#ifndef ENOTEMPTY
++#define ENOTEMPTY 39
++#endif
++
++#ifndef ENAMETOOLONG
++#define ENAMETOOLONG 36
++#endif
++
++#ifndef ENOMEM
++#define ENOMEM 12
++#endif
++
++#ifndef EEXIST
++#define EEXIST 17
++#endif
++
++#ifndef ENOTDIR
++#define ENOTDIR 20
++#endif
++
++#ifndef EISDIR
++#define EISDIR 21
++#endif
++
++
++// Mode flags
++
++#ifndef S_IFMT
++#define S_IFMT 0170000
++#endif
++
++#ifndef S_IFLNK
++#define S_IFLNK 0120000
++#endif
+
+-#define YAFFS_TRACE_OS 0x00000002
+-#define YAFFS_TRACE_ALLOCATE 0x00000004
+-#define YAFFS_TRACE_SCAN 0x00000008
+-#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+-#define YAFFS_TRACE_ERASE 0x00000020
+-#define YAFFS_TRACE_GC 0x00000040
+-#define YAFFS_TRACE_WRITE 0x00000080
+-#define YAFFS_TRACE_TRACING 0x00000100
+-#define YAFFS_TRACE_DELETION 0x00000200
+-#define YAFFS_TRACE_BUFFERS 0x00000400
+-#define YAFFS_TRACE_NANDACCESS 0x00000800
+-#define YAFFS_TRACE_GC_DETAIL 0x00001000
+-#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+-#define YAFFS_TRACE_MTD 0x00004000
+-#define YAFFS_TRACE_CHECKPOINT 0x00008000
+-
+-#define YAFFS_TRACE_VERIFY 0x00010000
+-#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+-#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+-#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
+-
+-
+-#define YAFFS_TRACE_ERROR 0x40000000
+-#define YAFFS_TRACE_BUG 0x80000000
+-#define YAFFS_TRACE_ALWAYS 0xF0000000
++#ifndef S_IFDIR
++#define S_IFDIR 0040000
++#endif
++
++#ifndef S_IFREG
++#define S_IFREG 0100000
++#endif
+
++#ifndef S_IREAD
++#define S_IREAD 0000400
++#endif
++
++#ifndef S_IWRITE
++#define S_IWRITE 0000200
++#endif
+
+-#define T(mask, p) do { if ((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
++#ifndef S_IEXEC
++#define S_IEXEC 0000100
++#endif
++
++#ifndef XATTR_CREATE
++#define XATTR_CREATE 1
++#endif
++
++#ifndef XATTR_REPLACE
++#define XATTR_REPLACE 2
++#endif
++
++#ifndef R_OK
++#define R_OK 4
++#define W_OK 2
++#define X_OK 1
++#define F_OK 0
++#endif
++
++#else
++#include <errno.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#endif
++
++#endif
++
++#ifndef Y_DUMP_STACK
++#define Y_DUMP_STACK() do { } while (0)
++#endif
+
+ #ifndef YBUG
+-#define YBUG() do {T(YAFFS_TRACE_BUG, (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR), __LINE__)); } while (0)
++#define YBUG() do {\
++ T(YAFFS_TRACE_BUG,\
++ (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),\
++ __LINE__));\
++ Y_DUMP_STACK();\
++} while (0)
+ #endif
+
++
+ #endif
diff --git a/target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch b/target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch
new file mode 100644
index 0000000..dabf287
--- /dev/null
+++ b/target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch
@@ -0,0 +1,17 @@
+--- a/fs/yaffs2/yaffs_guts.c
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -1709,11 +1709,11 @@ static int yaffs_change_obj_name(yaffs_o
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+- if (obj->my_dev->param.is_yaffs2)
++ // if (obj->my_dev->param.is_yaffs2)
+ unlinkOp = (new_dir == obj->my_dev->unlinked_dir);
+- else
++ /* else
+ unlinkOp = (new_dir == obj->my_dev->unlinked_dir
+- && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
++ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE); */
+
+ deleteOp = (new_dir == obj->my_dev->del_dir);
+
diff --git a/target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch b/target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch
new file mode 100644
index 0000000..b34b12f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch
@@ -0,0 +1,20 @@
+--- a/fs/yaffs2/yaffs_vfs_glue.c
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -3036,7 +3036,7 @@ static struct super_block *yaffs_interna
+ YINIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->searchContexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+- init_MUTEX(&(yaffs_dev_to_lc(dev)->grossLock));
++ sema_init(&(yaffs_dev_to_lc(dev)->grossLock), 1);
+
+ yaffs_gross_lock(dev);
+
+@@ -3494,7 +3494,7 @@ static int __init init_yaffs_fs(void)
+
+
+
+- init_MUTEX(&yaffs_context_lock);
++ sema_init((&yaffs_context_lock), 1);
+
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
diff --git a/target/linux/generic/patches-3.3/505-2.6.39_fix.patch b/target/linux/generic/patches-3.3/505-2.6.39_fix.patch
new file mode 100644
index 0000000..5108f7b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/505-2.6.39_fix.patch
@@ -0,0 +1,147 @@
+--- a/fs/yaffs2/yaffs_vfs_glue.c
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -72,7 +72,7 @@
+ #include <linux/init.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
+-#include <linux/smp_lock.h>
++#include <linux/mutex.h>
+ #include <linux/pagemap.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/interrupt.h>
+@@ -97,6 +97,8 @@
+
+ #include <asm/div64.h>
+
++static DEFINE_MUTEX(yaffs_mutex);
++
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ #include <linux/statfs.h>
+@@ -1538,7 +1540,7 @@ static loff_t yaffs_dir_llseek(struct fi
+ {
+ long long retval;
+
+- lock_kernel();
++ mutex_lock(&yaffs_mutex);
+
+ switch (origin){
+ case 2:
+@@ -1555,7 +1557,7 @@ static loff_t yaffs_dir_llseek(struct fi
+
+ retval = offset;
+ }
+- unlock_kernel();
++ mutex_unlock(&yaffs_mutex);
+ return retval;
+ }
+
+@@ -3087,98 +3089,52 @@ static struct super_block *yaffs_interna
+ return sb;
+ }
+
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_read_super(struct file_system_type *fs,
++static struct dentry *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+- void *data, struct vfsmount *mnt)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd, mnt);
+-}
+-#else
+-static struct super_block *yaffs_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
++ void *data)
+ {
+
+- return get_sb_bdev(fs, flags, dev_name, data,
++ return mount_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+ }
+-#endif
+
+ static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+- .get_sb = yaffs_read_super,
++ .mount = yaffs_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
+-#else
+-static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(1, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+-
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name, void *data,
+- struct vfsmount *mnt)
++static struct dentry *yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
+ {
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd, mnt);
++ return mount_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd);
+ }
+-#else
+-static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd);
+-}
+-#endif
+
+ static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+- .get_sb = yaffs2_read_super,
++ .mount = yaffs2_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
+-#else
+-static struct super_block *yaffs2_read_super(struct super_block *sb,
+- void *data, int silent)
+-{
+- return yaffs_internal_read_super(2, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+
+ #endif /* CONFIG_YAFFS_YAFFS2 */
+
diff --git a/target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch b/target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch
new file mode 100644
index 0000000..c51be07
--- /dev/null
+++ b/target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch
@@ -0,0 +1,5142 @@
+--- a/fs/jffs2/Kconfig
++++ b/fs/jffs2/Kconfig
+@@ -139,6 +139,15 @@ config JFFS2_LZO
+ This feature was added in July, 2007. Say 'N' if you need
+ compatibility with older bootloaders or kernels.
+
++config JFFS2_LZMA
++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
++ select LZMA_COMPRESS
++ select LZMA_DECOMPRESS
++ depends on JFFS2_FS
++ default n
++ help
++ JFFS2 wrapper to the LZMA C SDK
++
+ config JFFS2_RTIME
+ bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+ depends on JFFS2_FS
+--- a/fs/jffs2/Makefile
++++ b/fs/jffs2/Makefile
+@@ -18,4 +18,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub
+ jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+ jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+ jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o
++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o
+ jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
++
++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
+--- a/fs/jffs2/compr.c
++++ b/fs/jffs2/compr.c
+@@ -374,6 +374,9 @@ int __init jffs2_compressors_init(void)
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_init();
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_init();
++#endif
+ /* Setting default compression mode */
+ #ifdef CONFIG_JFFS2_CMODE_NONE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+@@ -397,6 +400,9 @@ int __init jffs2_compressors_init(void)
+ int jffs2_compressors_exit(void)
+ {
+ /* Unregistering compressors */
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_exit();
++#endif
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_exit();
+ #endif
+--- a/fs/jffs2/compr.h
++++ b/fs/jffs2/compr.h
+@@ -29,9 +29,9 @@
+ #define JFFS2_DYNRUBIN_PRIORITY 20
+ #define JFFS2_LZARI_PRIORITY 30
+ #define JFFS2_RTIME_PRIORITY 50
+-#define JFFS2_ZLIB_PRIORITY 60
+-#define JFFS2_LZO_PRIORITY 80
+-
++#define JFFS2_LZMA_PRIORITY 70
++#define JFFS2_ZLIB_PRIORITY 80
++#define JFFS2_LZO_PRIORITY 90
+
+ #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+ #define JFFS2_DYNRUBIN_DISABLED /* for decompression */
+@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
+ int jffs2_lzo_init(void);
+ void jffs2_lzo_exit(void);
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++int jffs2_lzma_init(void);
++void jffs2_lzma_exit(void);
++#endif
+
+ #endif /* __JFFS2_COMPR_H__ */
+--- /dev/null
++++ b/fs/jffs2/compr_lzma.c
+@@ -0,0 +1,128 @@
++/*
++ * JFFS2 -- Journalling Flash File System, Version 2.
++ *
++ * For licensing information, see the file 'LICENCE' in this directory.
++ *
++ * JFFS2 wrapper to the LZMA C SDK
++ *
++ */
++
++#include <linux/lzma.h>
++#include "compr.h"
++
++#ifdef __KERNEL__
++ static DEFINE_MUTEX(deflate_mutex);
++#endif
++
++CLzmaEncHandle *p;
++Byte propsEncoded[LZMA_PROPS_SIZE];
++SizeT propsSize = sizeof(propsEncoded);
++
++STATIC void lzma_free_workspace(void)
++{
++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
++}
++
++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
++{
++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
++ {
++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
++ return -ENOMEM;
++ }
++
++ if (LzmaEnc_SetProps(p, props) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t *sourcelen, uint32_t *dstlen)
++{
++ SizeT compress_size = (SizeT)(*dstlen);
++ int ret;
++
++ #ifdef __KERNEL__
++ mutex_lock(&deflate_mutex);
++ #endif
++
++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
++ 0, NULL, &lzma_alloc, &lzma_alloc);
++
++ #ifdef __KERNEL__
++ mutex_unlock(&deflate_mutex);
++ #endif
++
++ if (ret != SZ_OK)
++ return -1;
++
++ *dstlen = (uint32_t)compress_size;
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t srclen, uint32_t destlen)
++{
++ int ret;
++ SizeT dl = (SizeT)destlen;
++ SizeT sl = (SizeT)srclen;
++ ELzmaStatus status;
++
++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
++
++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
++ return -1;
++
++ return 0;
++}
++
++static struct jffs2_compressor jffs2_lzma_comp = {
++ .priority = JFFS2_LZMA_PRIORITY,
++ .name = "lzma",
++ .compr = JFFS2_COMPR_LZMA,
++ .compress = &jffs2_lzma_compress,
++ .decompress = &jffs2_lzma_decompress,
++ .disabled = 0,
++};
++
++int INIT jffs2_lzma_init(void)
++{
++ int ret;
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++
++ props.dictSize = LZMA_BEST_DICT(0x2000);
++ props.level = LZMA_BEST_LEVEL;
++ props.lc = LZMA_BEST_LC;
++ props.lp = LZMA_BEST_LP;
++ props.pb = LZMA_BEST_PB;
++ props.fb = LZMA_BEST_FB;
++
++ ret = lzma_alloc_workspace(&props);
++ if (ret < 0)
++ return ret;
++
++ ret = jffs2_register_compressor(&jffs2_lzma_comp);
++ if (ret)
++ lzma_free_workspace();
++
++ return ret;
++}
++
++void jffs2_lzma_exit(void)
++{
++ jffs2_unregister_compressor(&jffs2_lzma_comp);
++ lzma_free_workspace();
++}
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -371,14 +371,41 @@ static int __init init_jffs2_fs(void)
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
+- printk(KERN_INFO "JFFS2 version 2.2."
++ printk(KERN_INFO "JFFS2 version 2.2"
+ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
+ #endif
+ #ifdef CONFIG_JFFS2_SUMMARY
+- " (SUMMARY) "
++ " (SUMMARY)"
+ #endif
+- " © 2001-2006 Red Hat, Inc.\n");
++#ifdef CONFIG_JFFS2_ZLIB
++ " (ZLIB)"
++#endif
++#ifdef CONFIG_JFFS2_LZO
++ " (LZO)"
++#endif
++#ifdef CONFIG_JFFS2_LZMA
++ " (LZMA)"
++#endif
++#ifdef CONFIG_JFFS2_RTIME
++ " (RTIME)"
++#endif
++#ifdef CONFIG_JFFS2_RUBIN
++ " (RUBIN)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_NONE
++ " (CMODE_NONE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_PRIORITY
++ " (CMODE_PRIORITY)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_SIZE
++ " (CMODE_SIZE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
++ " (CMODE_FAVOURLZO)"
++#endif
++ " (c) 2001-2006 Red Hat, Inc.\n");
+
+ jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+ sizeof(struct jffs2_inode_info),
+--- a/include/linux/jffs2.h
++++ b/include/linux/jffs2.h
+@@ -46,6 +46,7 @@
+ #define JFFS2_COMPR_DYNRUBIN 0x05
+ #define JFFS2_COMPR_ZLIB 0x06
+ #define JFFS2_COMPR_LZO 0x07
++#define JFFS2_COMPR_LZMA 0x08
+ /* Compatibility flags. */
+ #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
+ #define JFFS2_NODE_ACCURATE 0x2000
+--- /dev/null
++++ b/include/linux/lzma.h
+@@ -0,0 +1,62 @@
++#ifndef __LZMA_H__
++#define __LZMA_H__
++
++#ifdef __KERNEL__
++ #include <linux/kernel.h>
++ #include <linux/sched.h>
++ #include <linux/slab.h>
++ #include <linux/vmalloc.h>
++ #include <linux/init.h>
++ #define LZMA_MALLOC vmalloc
++ #define LZMA_FREE vfree
++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
++ #define INIT __init
++ #define STATIC static
++#else
++ #include <stdint.h>
++ #include <stdlib.h>
++ #include <stdio.h>
++ #include <unistd.h>
++ #include <string.h>
++ #include <asm/types.h>
++ #include <errno.h>
++ #include <linux/jffs2.h>
++ #ifndef PAGE_SIZE
++ extern int page_size;
++ #define PAGE_SIZE page_size
++ #endif
++ #define LZMA_MALLOC malloc
++ #define LZMA_FREE free
++ #define PRINT_ERROR(msg) fprintf(stderr, msg)
++ #define INIT
++ #define STATIC
++#endif
++
++#include "lzma/LzmaDec.h"
++#include "lzma/LzmaEnc.h"
++
++#define LZMA_BEST_LEVEL (9)
++#define LZMA_BEST_LC (0)
++#define LZMA_BEST_LP (0)
++#define LZMA_BEST_PB (0)
++#define LZMA_BEST_FB (273)
++
++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
++
++static void *p_lzma_malloc(void *p, size_t size)
++{
++ if (size == 0)
++ return NULL;
++
++ return LZMA_MALLOC(size);
++}
++
++static void p_lzma_free(void *p, void *address)
++{
++ if (address != NULL)
++ LZMA_FREE(address);
++}
++
++static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzFind.h
+@@ -0,0 +1,115 @@
++/* LzFind.h -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_FIND_H
++#define __LZ_FIND_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef UInt32 CLzRef;
++
++typedef struct _CMatchFinder
++{
++ Byte *buffer;
++ UInt32 pos;
++ UInt32 posLimit;
++ UInt32 streamPos;
++ UInt32 lenLimit;
++
++ UInt32 cyclicBufferPos;
++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
++
++ UInt32 matchMaxLen;
++ CLzRef *hash;
++ CLzRef *son;
++ UInt32 hashMask;
++ UInt32 cutValue;
++
++ Byte *bufferBase;
++ ISeqInStream *stream;
++ int streamEndWasReached;
++
++ UInt32 blockSize;
++ UInt32 keepSizeBefore;
++ UInt32 keepSizeAfter;
++
++ UInt32 numHashBytes;
++ int directInput;
++ size_t directInputRem;
++ int btMode;
++ int bigHash;
++ UInt32 historySize;
++ UInt32 fixedHashSize;
++ UInt32 hashSizeSum;
++ UInt32 numSons;
++ SRes result;
++ UInt32 crc[256];
++} CMatchFinder;
++
++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
++
++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
++
++int MatchFinder_NeedMove(CMatchFinder *p);
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
++void MatchFinder_MoveBlock(CMatchFinder *p);
++void MatchFinder_ReadIfRequired(CMatchFinder *p);
++
++void MatchFinder_Construct(CMatchFinder *p);
++
++/* Conditions:
++ historySize <= 3 GB
++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
++*/
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc);
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
++ UInt32 *distances, UInt32 maxLen);
++
++/*
++Conditions:
++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
++*/
++
++typedef void (*Mf_Init_Func)(void *object);
++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
++typedef void (*Mf_Skip_Func)(void *object, UInt32);
++
++typedef struct _IMatchFinder
++{
++ Mf_Init_Func Init;
++ Mf_GetIndexByte_Func GetIndexByte;
++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
++ Mf_GetMatches_Func GetMatches;
++ Mf_Skip_Func Skip;
++} IMatchFinder;
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
++
++void MatchFinder_Init(CMatchFinder *p);
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzHash.h
+@@ -0,0 +1,54 @@
++/* LzHash.h -- HASH functions for LZ algorithms
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_HASH_H
++#define __LZ_HASH_H
++
++#define kHash2Size (1 << 10)
++#define kHash3Size (1 << 16)
++#define kHash4Size (1 << 20)
++
++#define kFix3HashSize (kHash2Size)
++#define kFix4HashSize (kHash2Size + kHash3Size)
++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
++
++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
++
++#define HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
++
++#define HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
++
++#define HASH5_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
++ hash4Value &= (kHash4Size - 1); }
++
++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
++
++
++#define MT_HASH2_CALC \
++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
++
++#define MT_HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
++
++#define MT_HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaDec.h
+@@ -0,0 +1,231 @@
++/* LzmaDec.h -- LZMA Decoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_DEC_H
++#define __LZMA_DEC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* #define _LZMA_PROB32 */
++/* _LZMA_PROB32 can increase the speed on some CPUs,
++ but memory usage for CLzmaDec::probs will be doubled in that case */
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++
++/* ---------- LZMA Properties ---------- */
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaProps
++{
++ unsigned lc, lp, pb;
++ UInt32 dicSize;
++} CLzmaProps;
++
++/* LzmaProps_Decode - decodes properties
++Returns:
++ SZ_OK
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
++
++
++/* ---------- LZMA Decoder state ---------- */
++
++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
++
++#define LZMA_REQUIRED_INPUT_MAX 20
++
++typedef struct
++{
++ CLzmaProps prop;
++ CLzmaProb *probs;
++ Byte *dic;
++ const Byte *buf;
++ UInt32 range, code;
++ SizeT dicPos;
++ SizeT dicBufSize;
++ UInt32 processedPos;
++ UInt32 checkDicSize;
++ unsigned state;
++ UInt32 reps[4];
++ unsigned remainLen;
++ int needFlush;
++ int needInitState;
++ UInt32 numProbs;
++ unsigned tempBufSize;
++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
++} CLzmaDec;
++
++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
++
++void LzmaDec_Init(CLzmaDec *p);
++
++/* There are two types of LZMA streams:
++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
++
++typedef enum
++{
++ LZMA_FINISH_ANY, /* finish at any point */
++ LZMA_FINISH_END /* block must be finished at the end */
++} ELzmaFinishMode;
++
++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
++
++ You must use LZMA_FINISH_END, when you know that current output buffer
++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
++
++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
++ and output value of destLen will be less than output buffer size limit.
++ You can check status result also.
++
++ You can use multiple checks to test data integrity after full decompression:
++ 1) Check Result and "status" variable.
++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
++ You must use correct finish mode in that case. */
++
++typedef enum
++{
++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
++} ELzmaStatus;
++
++/* ELzmaStatus is used only as output value for function call */
++
++
++/* ---------- Interfaces ---------- */
++
++/* There are 3 levels of interfaces:
++ 1) Dictionary Interface
++ 2) Buffer Interface
++ 3) One Call Interface
++ You can select any of these interfaces, but don't mix functions from different
++ groups for same object. */
++
++
++/* There are two variants to allocate state for Dictionary Interface:
++ 1) LzmaDec_Allocate / LzmaDec_Free
++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
++ You can use variant 2, if you set dictionary buffer manually.
++ For Buffer Interface you must always use variant 1.
++
++LzmaDec_Allocate* can return:
++ SZ_OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
++
++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
++
++/* ---------- Dictionary Interface ---------- */
++
++/* You can use it, if you want to eliminate the overhead for data copying from
++ dictionary to some other external buffer.
++ You must work with CLzmaDec variables directly in this interface.
++
++ STEPS:
++ LzmaDec_Constr()
++ LzmaDec_Allocate()
++ for (each new stream)
++ {
++ LzmaDec_Init()
++ while (it needs more decompression)
++ {
++ LzmaDec_DecodeToDic()
++ use data from CLzmaDec::dic and update CLzmaDec::dicPos
++ }
++ }
++ LzmaDec_Free()
++*/
++
++/* LzmaDec_DecodeToDic
++
++ The decoding to internal dictionary buffer (CLzmaDec::dic).
++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (dicLimit).
++ LZMA_FINISH_ANY - Decode just dicLimit bytes.
++ LZMA_FINISH_END - Stream must be finished after dicLimit.
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_NEEDS_MORE_INPUT
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++*/
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- Buffer Interface ---------- */
++
++/* It's zlib-like interface.
++ See LzmaDec_DecodeToDic description for information about STEPS and return results,
++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
++ to work with CLzmaDec variables manually.
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++*/
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaDecode
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
++*/
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -0,0 +1,80 @@
++/* LzmaEnc.h -- LZMA Encoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_ENC_H
++#define __LZMA_ENC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaEncProps
++{
++ int level; /* 0 <= level <= 9 */
++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
++ default = (1 << 24) */
++ int lc; /* 0 <= lc <= 8, default = 3 */
++ int lp; /* 0 <= lp <= 4, default = 0 */
++ int pb; /* 0 <= pb <= 4, default = 2 */
++ int algo; /* 0 - fast, 1 - normal, default = 1 */
++ int fb; /* 5 <= fb <= 273, default = 32 */
++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
++ int numHashBytes; /* 2, 3 or 4, default = 4 */
++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
++ int numThreads; /* 1 or 2, default = 2 */
++} CLzmaEncProps;
++
++void LzmaEncProps_Init(CLzmaEncProps *p);
++void LzmaEncProps_Normalize(CLzmaEncProps *p);
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
++
++
++/* ---------- CLzmaEncHandle Interface ---------- */
++
++/* LzmaEnc_* functions can return the following exit codes:
++Returns:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater in props
++ SZ_ERROR_WRITE - Write callback error.
++ SZ_ERROR_PROGRESS - some break from progress callback
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++typedef void * CLzmaEncHandle;
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaEncode
++Return code:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater
++ SZ_ERROR_OUTPUT_EOF - output buffer overflow
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/Types.h
+@@ -0,0 +1,226 @@
++/* Types.h -- Basic types
++2009-11-23 : Igor Pavlov : Public domain */
++
++#ifndef __7Z_TYPES_H
++#define __7Z_TYPES_H
++
++#include <stddef.h>
++
++#ifdef _WIN32
++#include <windows.h>
++#endif
++
++#ifndef EXTERN_C_BEGIN
++#ifdef __cplusplus
++#define EXTERN_C_BEGIN extern "C" {
++#define EXTERN_C_END }
++#else
++#define EXTERN_C_BEGIN
++#define EXTERN_C_END
++#endif
++#endif
++
++EXTERN_C_BEGIN
++
++#define SZ_OK 0
++
++#define SZ_ERROR_DATA 1
++#define SZ_ERROR_MEM 2
++#define SZ_ERROR_CRC 3
++#define SZ_ERROR_UNSUPPORTED 4
++#define SZ_ERROR_PARAM 5
++#define SZ_ERROR_INPUT_EOF 6
++#define SZ_ERROR_OUTPUT_EOF 7
++#define SZ_ERROR_READ 8
++#define SZ_ERROR_WRITE 9
++#define SZ_ERROR_PROGRESS 10
++#define SZ_ERROR_FAIL 11
++#define SZ_ERROR_THREAD 12
++
++#define SZ_ERROR_ARCHIVE 16
++#define SZ_ERROR_NO_ARCHIVE 17
++
++typedef int SRes;
++
++#ifdef _WIN32
++typedef DWORD WRes;
++#else
++typedef int WRes;
++#endif
++
++#ifndef RINOK
++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
++#endif
++
++typedef unsigned char Byte;
++typedef short Int16;
++typedef unsigned short UInt16;
++
++#ifdef _LZMA_UINT32_IS_ULONG
++typedef long Int32;
++typedef unsigned long UInt32;
++#else
++typedef int Int32;
++typedef unsigned int UInt32;
++#endif
++
++#ifdef _SZ_NO_INT_64
++
++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
++ NOTES: Some code will work incorrectly in that case! */
++
++typedef long Int64;
++typedef unsigned long UInt64;
++
++#else
++
++#if defined(_MSC_VER) || defined(__BORLANDC__)
++typedef __int64 Int64;
++typedef unsigned __int64 UInt64;
++#else
++typedef long long int Int64;
++typedef unsigned long long int UInt64;
++#endif
++
++#endif
++
++#ifdef _LZMA_NO_SYSTEM_SIZE_T
++typedef UInt32 SizeT;
++#else
++typedef size_t SizeT;
++#endif
++
++typedef int Bool;
++#define True 1
++#define False 0
++
++
++#ifdef _WIN32
++#define MY_STD_CALL __stdcall
++#else
++#define MY_STD_CALL
++#endif
++
++#ifdef _MSC_VER
++
++#if _MSC_VER >= 1300
++#define MY_NO_INLINE __declspec(noinline)
++#else
++#define MY_NO_INLINE
++#endif
++
++#define MY_CDECL __cdecl
++#define MY_FAST_CALL __fastcall
++
++#else
++
++#define MY_CDECL
++#define MY_FAST_CALL
++
++#endif
++
++
++/* The following interfaces use first parameter as pointer to structure */
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) < input(*size)) is allowed */
++} ISeqInStream;
++
++/* it can return SZ_ERROR_INPUT_EOF */
++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
++
++typedef struct
++{
++ size_t (*Write)(void *p, const void *buf, size_t size);
++ /* Returns: result - the number of actually written bytes.
++ (result < size) means error */
++} ISeqOutStream;
++
++typedef enum
++{
++ SZ_SEEK_SET = 0,
++ SZ_SEEK_CUR = 1,
++ SZ_SEEK_END = 2
++} ESzSeek;
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ISeekInStream;
++
++typedef struct
++{
++ SRes (*Look)(void *p, void **buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) > input(*size)) is not allowed
++ (output(*size) < input(*size)) is allowed */
++ SRes (*Skip)(void *p, size_t offset);
++ /* offset must be <= output(*size) of Look */
++
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* reads directly (without buffer). It's same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ILookInStream;
++
++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
++
++/* reads via ILookInStream::Read */
++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
++
++#define LookToRead_BUF_SIZE (1 << 14)
++
++typedef struct
++{
++ ILookInStream s;
++ ISeekInStream *realStream;
++ size_t pos;
++ size_t size;
++ Byte buf[LookToRead_BUF_SIZE];
++} CLookToRead;
++
++void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
++void LookToRead_Init(CLookToRead *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToLook;
++
++void SecToLook_CreateVTable(CSecToLook *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToRead;
++
++void SecToRead_CreateVTable(CSecToRead *p);
++
++typedef struct
++{
++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
++ /* Returns: result. (result != SZ_OK) means break.
++ Value (UInt64)(Int64)-1 for size means unknown value. */
++} ICompressProgress;
++
++typedef struct
++{
++ void *(*Alloc)(void *p, size_t size);
++ void (*Free)(void *p, void *address); /* address can be 0 */
++} ISzAlloc;
++
++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
++#define IAlloc_Free(p, a) (p)->Free((p), a)
++
++EXTERN_C_END
++
++#endif
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -115,6 +115,12 @@ config LZO_DECOMPRESS
+
+ source "lib/xz/Kconfig"
+
++config LZMA_COMPRESS
++ tristate
++
++config LZMA_DECOMPRESS
++ tristate
++
+ #
+ # These all provide a common interface (hence the apparent duplication with
+ # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -2,6 +2,16 @@
+ # Makefile for some libs needed in the kernel.
+ #
+
++ifdef CONFIG_JFFS2_ZLIB
++ CONFIG_ZLIB_INFLATE:=y
++ CONFIG_ZLIB_DEFLATE:=y
++endif
++
++ifdef CONFIG_JFFS2_LZMA
++ CONFIG_LZMA_DECOMPRESS:=y
++ CONFIG_LZMA_COMPRESS:=y
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+ KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+@@ -73,6 +83,8 @@ obj-$(CONFIG_LZO_COMPRESS) += lzo/
+ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+ obj-$(CONFIG_XZ_DEC) += xz/
+ obj-$(CONFIG_RAID6_PQ) += raid6/
++obj-$(CONFIG_LZMA_COMPRESS) += lzma/
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
+
+ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+--- /dev/null
++++ b/lib/lzma/LzFind.c
+@@ -0,0 +1,761 @@
++/* LzFind.c -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++#include "LzFind.h"
++#include "LzHash.h"
++
++#define kEmptyHashValue 0
++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
++#define kNormalizeMask (~(kNormalizeStepMin - 1))
++#define kMaxHistorySize ((UInt32)3 << 30)
++
++#define kStartMaxLen 3
++
++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ if (!p->directInput)
++ {
++ alloc->Free(alloc, p->bufferBase);
++ p->bufferBase = 0;
++ }
++}
++
++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
++
++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
++{
++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
++ if (p->directInput)
++ {
++ p->blockSize = blockSize;
++ return 1;
++ }
++ if (p->bufferBase == 0 || p->blockSize != blockSize)
++ {
++ LzInWindow_Free(p, alloc);
++ p->blockSize = blockSize;
++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
++ }
++ return (p->bufferBase != 0);
++}
++
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++
++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++{
++ p->posLimit -= subValue;
++ p->pos -= subValue;
++ p->streamPos -= subValue;
++}
++
++static void MatchFinder_ReadBlock(CMatchFinder *p)
++{
++ if (p->streamEndWasReached || p->result != SZ_OK)
++ return;
++ if (p->directInput)
++ {
++ UInt32 curSize = 0xFFFFFFFF - p->streamPos;
++ if (curSize > p->directInputRem)
++ curSize = (UInt32)p->directInputRem;
++ p->directInputRem -= curSize;
++ p->streamPos += curSize;
++ if (p->directInputRem == 0)
++ p->streamEndWasReached = 1;
++ return;
++ }
++ for (;;)
++ {
++ Byte *dest = p->buffer + (p->streamPos - p->pos);
++ size_t size = (p->bufferBase + p->blockSize - dest);
++ if (size == 0)
++ return;
++ p->result = p->stream->Read(p->stream, dest, &size);
++ if (p->result != SZ_OK)
++ return;
++ if (size == 0)
++ {
++ p->streamEndWasReached = 1;
++ return;
++ }
++ p->streamPos += (UInt32)size;
++ if (p->streamPos - p->pos > p->keepSizeAfter)
++ return;
++ }
++}
++
++void MatchFinder_MoveBlock(CMatchFinder *p)
++{
++ memmove(p->bufferBase,
++ p->buffer - p->keepSizeBefore,
++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
++ p->buffer = p->bufferBase + p->keepSizeBefore;
++}
++
++int MatchFinder_NeedMove(CMatchFinder *p)
++{
++ if (p->directInput)
++ return 0;
++ /* if (p->streamEndWasReached) return 0; */
++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
++}
++
++void MatchFinder_ReadIfRequired(CMatchFinder *p)
++{
++ if (p->streamEndWasReached)
++ return;
++ if (p->keepSizeAfter >= p->streamPos - p->pos)
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
++{
++ if (MatchFinder_NeedMove(p))
++ MatchFinder_MoveBlock(p);
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
++{
++ p->cutValue = 32;
++ p->btMode = 1;
++ p->numHashBytes = 4;
++ p->bigHash = 0;
++}
++
++#define kCrcPoly 0xEDB88320
++
++void MatchFinder_Construct(CMatchFinder *p)
++{
++ UInt32 i;
++ p->bufferBase = 0;
++ p->directInput = 0;
++ p->hash = 0;
++ MatchFinder_SetDefaultSettings(p);
++
++ for (i = 0; i < 256; i++)
++ {
++ UInt32 r = i;
++ int j;
++ for (j = 0; j < 8; j++)
++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
++ p->crc[i] = r;
++ }
++}
++
++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->hash);
++ p->hash = 0;
++}
++
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ LzInWindow_Free(p, alloc);
++}
++
++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
++{
++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
++ if (sizeInBytes / sizeof(CLzRef) != num)
++ return 0;
++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
++}
++
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc)
++{
++ UInt32 sizeReserv;
++ if (historySize > kMaxHistorySize)
++ {
++ MatchFinder_Free(p, alloc);
++ return 0;
++ }
++ sizeReserv = historySize >> 1;
++ if (historySize > ((UInt32)2 << 30))
++ sizeReserv = historySize >> 2;
++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
++
++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
++ if (LzInWindow_Create(p, sizeReserv, alloc))
++ {
++ UInt32 newCyclicBufferSize = historySize + 1;
++ UInt32 hs;
++ p->matchMaxLen = matchMaxLen;
++ {
++ p->fixedHashSize = 0;
++ if (p->numHashBytes == 2)
++ hs = (1 << 16) - 1;
++ else
++ {
++ hs = historySize - 1;
++ hs |= (hs >> 1);
++ hs |= (hs >> 2);
++ hs |= (hs >> 4);
++ hs |= (hs >> 8);
++ hs >>= 1;
++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
++ if (hs > (1 << 24))
++ {
++ if (p->numHashBytes == 3)
++ hs = (1 << 24) - 1;
++ else
++ hs >>= 1;
++ }
++ }
++ p->hashMask = hs;
++ hs++;
++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
++ hs += p->fixedHashSize;
++ }
++
++ {
++ UInt32 prevSize = p->hashSizeSum + p->numSons;
++ UInt32 newSize;
++ p->historySize = historySize;
++ p->hashSizeSum = hs;
++ p->cyclicBufferSize = newCyclicBufferSize;
++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
++ newSize = p->hashSizeSum + p->numSons;
++ if (p->hash != 0 && prevSize == newSize)
++ return 1;
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ p->hash = AllocRefs(newSize, alloc);
++ if (p->hash != 0)
++ {
++ p->son = p->hash + p->hashSizeSum;
++ return 1;
++ }
++ }
++ }
++ MatchFinder_Free(p, alloc);
++ return 0;
++}
++
++static void MatchFinder_SetLimits(CMatchFinder *p)
++{
++ UInt32 limit = kMaxValForNormalize - p->pos;
++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
++ if (limit2 < limit)
++ limit = limit2;
++ limit2 = p->streamPos - p->pos;
++ if (limit2 <= p->keepSizeAfter)
++ {
++ if (limit2 > 0)
++ limit2 = 1;
++ }
++ else
++ limit2 -= p->keepSizeAfter;
++ if (limit2 < limit)
++ limit = limit2;
++ {
++ UInt32 lenLimit = p->streamPos - p->pos;
++ if (lenLimit > p->matchMaxLen)
++ lenLimit = p->matchMaxLen;
++ p->lenLimit = lenLimit;
++ }
++ p->posLimit = p->pos + limit;
++}
++
++void MatchFinder_Init(CMatchFinder *p)
++{
++ UInt32 i;
++ for (i = 0; i < p->hashSizeSum; i++)
++ p->hash[i] = kEmptyHashValue;
++ p->cyclicBufferPos = 0;
++ p->buffer = p->bufferBase;
++ p->pos = p->streamPos = p->cyclicBufferSize;
++ p->result = SZ_OK;
++ p->streamEndWasReached = 0;
++ MatchFinder_ReadBlock(p);
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
++{
++ return (p->pos - p->historySize - 1) & kNormalizeMask;
++}
++
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++{
++ UInt32 i;
++ for (i = 0; i < numItems; i++)
++ {
++ UInt32 value = items[i];
++ if (value <= subValue)
++ value = kEmptyHashValue;
++ else
++ value -= subValue;
++ items[i] = value;
++ }
++}
++
++static void MatchFinder_Normalize(CMatchFinder *p)
++{
++ UInt32 subValue = MatchFinder_GetSubValue(p);
++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
++ MatchFinder_ReduceOffsets(p, subValue);
++}
++
++static void MatchFinder_CheckLimits(CMatchFinder *p)
++{
++ if (p->pos == kMaxValForNormalize)
++ MatchFinder_Normalize(p);
++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
++ MatchFinder_CheckAndMoveAndRead(p);
++ if (p->cyclicBufferPos == p->cyclicBufferSize)
++ p->cyclicBufferPos = 0;
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ son[_cyclicBufferPos] = curMatch;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ return distances;
++ {
++ const Byte *pb = cur - delta;
++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
++ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
++ {
++ UInt32 len = 0;
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ return distances;
++ }
++ }
++ }
++ }
++}
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return distances;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ if (++len != lenLimit && pb[len] == cur[len])
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return distances;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ {
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++#define MOVE_POS \
++ ++p->cyclicBufferPos; \
++ p->buffer++; \
++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
++
++#define MOVE_POS_RET MOVE_POS return offset;
++
++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
++
++#define GET_MATCHES_HEADER2(minLen, ret_op) \
++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
++ cur = p->buffer;
++
++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
++
++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
++
++#define GET_MATCHES_FOOTER(offset, maxLen) \
++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
++ distances + offset, maxLen) - distances); MOVE_POS_RET;
++
++#define SKIP_FOOTER \
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
++
++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 1)
++}
++
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 2)
++}
++
++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, delta2, maxLen, offset;
++ GET_MATCHES_HEADER(3)
++
++ HASH3_CALC;
++
++ delta2 = p->pos - p->hash[hash2Value];
++ curMatch = p->hash[kFix3HashSize + hashValue];
++
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++
++
++ maxLen = 2;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[0] = maxLen;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances + offset, maxLen) - (distances));
++ MOVE_POS_RET
++}
++
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances, 2) - (distances));
++ MOVE_POS_RET
++}
++
++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value;
++ SKIP_HEADER(3)
++ HASH3_CALC;
++ curMatch = p->hash[kFix3HashSize + hashValue];
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] = p->pos;
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
++{
++ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
++ if (!p->btMode)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 2)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 3)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
++ }
++ else
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
++ }
++}
+--- /dev/null
++++ b/lib/lzma/LzmaDec.c
+@@ -0,0 +1,999 @@
++/* LzmaDec.c -- LZMA Decoder
++2009-09-20 : Igor Pavlov : Public domain */
++
++#include "LzmaDec.h"
++
++#include <string.h>
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++#define RC_INIT_SIZE 5
++
++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
++ { UPDATE_0(p); i = (i + i); A0; } else \
++ { UPDATE_1(p); i = (i + i) + 1; A1; }
++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
++
++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
++#define TREE_DECODE(probs, limit, i) \
++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
++
++/* #define _LZMA_SIZE_OPT */
++
++#ifdef _LZMA_SIZE_OPT
++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
++#else
++#define TREE_6_DECODE(probs, i) \
++ { i = 1; \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ i -= 0x40; }
++#endif
++
++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0_CHECK range = bound;
++#define UPDATE_1_CHECK range -= bound; code -= bound;
++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
++ { UPDATE_0_CHECK; i = (i + i); A0; } else \
++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
++#define TREE_DECODE_CHECK(probs, limit, i) \
++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
++
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++
++#define kNumStates 12
++#define kNumLitStates 7
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#define LZMA_DIC_MIN (1 << 12)
++
++/* First LZMA-symbol is always decoded.
++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
++Out:
++ Result:
++ SZ_OK - OK
++ SZ_ERROR_DATA - Error
++ p->remainLen:
++ < kMatchSpecLenStart : normal remain
++ = kMatchSpecLenStart : finished
++ = kMatchSpecLenStart + 1 : Flush marker
++ = kMatchSpecLenStart + 2 : State Init Marker
++*/
++
++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ CLzmaProb *probs = p->probs;
++
++ unsigned state = p->state;
++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
++ unsigned lc = p->prop.lc;
++
++ Byte *dic = p->dic;
++ SizeT dicBufSize = p->dicBufSize;
++ SizeT dicPos = p->dicPos;
++
++ UInt32 processedPos = p->processedPos;
++ UInt32 checkDicSize = p->checkDicSize;
++ unsigned len = 0;
++
++ const Byte *buf = p->buf;
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++
++ do
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = processedPos & pbMask;
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ unsigned symbol;
++ UPDATE_0(prob);
++ prob = probs + Literal;
++ if (checkDicSize != 0 || processedPos != 0)
++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
++
++ if (state < kNumLitStates)
++ {
++ state -= (state < 4) ? state : 3;
++ symbol = 1;
++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ unsigned offs = 0x100;
++ state -= (state < 10) ? 3 : 6;
++ symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ dic[dicPos++] = (Byte)symbol;
++ processedPos++;
++ continue;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRep + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ state += kNumStates;
++ prob = probs + LenCoder;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ if (checkDicSize == 0 && processedPos == 0)
++ return SZ_ERROR_DATA;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ processedPos++;
++ state = state < kNumLitStates ? 9 : 11;
++ continue;
++ }
++ UPDATE_1(prob);
++ }
++ else
++ {
++ UInt32 distance;
++ UPDATE_1(prob);
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep1;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep2;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ distance = rep3;
++ rep3 = rep2;
++ }
++ rep2 = rep1;
++ }
++ rep1 = rep0;
++ rep0 = distance;
++ }
++ state = state < kNumLitStates ? 8 : 11;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = (1 << kLenNumLowBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenChoice2;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = (1 << kLenNumMidBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = (1 << kLenNumHighBits);
++ }
++ }
++ TREE_DECODE(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state >= kNumStates)
++ {
++ UInt32 distance;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
++ TREE_6_DECODE(prob, distance);
++ if (distance >= kStartPosModelIndex)
++ {
++ unsigned posSlot = (unsigned)distance;
++ int numDirectBits = (int)(((distance >> 1) - 1));
++ distance = (2 | (distance & 1));
++ if (posSlot < kEndPosModelIndex)
++ {
++ distance <<= numDirectBits;
++ prob = probs + SpecPos + distance - posSlot - 1;
++ {
++ UInt32 mask = 1;
++ unsigned i = 1;
++ do
++ {
++ GET_BIT2(prob + i, i, ; , distance |= mask);
++ mask <<= 1;
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE
++ range >>= 1;
++
++ {
++ UInt32 t;
++ code -= range;
++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
++ distance = (distance << 1) + (t + 1);
++ code += range & t;
++ }
++ /*
++ distance <<= 1;
++ if (code >= range)
++ {
++ code -= range;
++ distance |= 1;
++ }
++ */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ distance <<= kNumAlignBits;
++ {
++ unsigned i = 1;
++ GET_BIT2(prob + i, i, ; , distance |= 1);
++ GET_BIT2(prob + i, i, ; , distance |= 2);
++ GET_BIT2(prob + i, i, ; , distance |= 4);
++ GET_BIT2(prob + i, i, ; , distance |= 8);
++ }
++ if (distance == (UInt32)0xFFFFFFFF)
++ {
++ len += kMatchSpecLenStart;
++ state -= kNumStates;
++ break;
++ }
++ }
++ }
++ rep3 = rep2;
++ rep2 = rep1;
++ rep1 = rep0;
++ rep0 = distance + 1;
++ if (checkDicSize == 0)
++ {
++ if (distance >= processedPos)
++ return SZ_ERROR_DATA;
++ }
++ else if (distance >= checkDicSize)
++ return SZ_ERROR_DATA;
++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
++ }
++
++ len += kMatchMinLen;
++
++ if (limit == dicPos)
++ return SZ_ERROR_DATA;
++ {
++ SizeT rem = limit - dicPos;
++ unsigned curLen = ((rem < len) ? (unsigned)rem : len);
++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
++
++ processedPos += curLen;
++
++ len -= curLen;
++ if (pos + curLen <= dicBufSize)
++ {
++ Byte *dest = dic + dicPos;
++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
++ const Byte *lim = dest + curLen;
++ dicPos += curLen;
++ do
++ *(dest) = (Byte)*(dest + src);
++ while (++dest != lim);
++ }
++ else
++ {
++ do
++ {
++ dic[dicPos++] = dic[pos];
++ if (++pos == dicBufSize)
++ pos = 0;
++ }
++ while (--curLen != 0);
++ }
++ }
++ }
++ }
++ while (dicPos < limit && buf < bufLimit);
++ NORMALIZE;
++ p->buf = buf;
++ p->range = range;
++ p->code = code;
++ p->remainLen = len;
++ p->dicPos = dicPos;
++ p->processedPos = processedPos;
++ p->reps[0] = rep0;
++ p->reps[1] = rep1;
++ p->reps[2] = rep2;
++ p->reps[3] = rep3;
++ p->state = state;
++
++ return SZ_OK;
++}
++
++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
++{
++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
++ {
++ Byte *dic = p->dic;
++ SizeT dicPos = p->dicPos;
++ SizeT dicBufSize = p->dicBufSize;
++ unsigned len = p->remainLen;
++ UInt32 rep0 = p->reps[0];
++ if (limit - dicPos < len)
++ len = (unsigned)(limit - dicPos);
++
++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
++ p->checkDicSize = p->prop.dicSize;
++
++ p->processedPos += len;
++ p->remainLen -= len;
++ while (len-- != 0)
++ {
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ }
++ p->dicPos = dicPos;
++ }
++}
++
++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ do
++ {
++ SizeT limit2 = limit;
++ if (p->checkDicSize == 0)
++ {
++ UInt32 rem = p->prop.dicSize - p->processedPos;
++ if (limit - p->dicPos > rem)
++ limit2 = p->dicPos + rem;
++ }
++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
++ if (p->processedPos >= p->prop.dicSize)
++ p->checkDicSize = p->prop.dicSize;
++ LzmaDec_WriteRem(p, limit);
++ }
++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
++
++ if (p->remainLen > kMatchSpecLenStart)
++ {
++ p->remainLen = kMatchSpecLenStart;
++ }
++ return 0;
++}
++
++typedef enum
++{
++ DUMMY_ERROR, /* unexpected end of input stream */
++ DUMMY_LIT,
++ DUMMY_MATCH,
++ DUMMY_REP
++} ELzmaDummy;
++
++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
++{
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++ const Byte *bufLimit = buf + inSize;
++ CLzmaProb *probs = p->probs;
++ unsigned state = p->state;
++ ELzmaDummy res;
++
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK
++
++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
++
++ prob = probs + Literal;
++ if (p->checkDicSize != 0 || p->processedPos != 0)
++ prob += (LZMA_LIT_SIZE *
++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
++
++ if (state < kNumLitStates)
++ {
++ unsigned symbol = 1;
++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
++ unsigned offs = 0x100;
++ unsigned symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ res = DUMMY_LIT;
++ }
++ else
++ {
++ unsigned len;
++ UPDATE_1_CHECK;
++
++ prob = probs + IsRep + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ state = 0;
++ prob = probs + LenCoder;
++ res = DUMMY_MATCH;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ res = DUMMY_REP;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ NORMALIZE_CHECK;
++ return DUMMY_REP;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ }
++ state = kNumStates;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = 1 << kLenNumLowBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenChoice2;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = 1 << kLenNumMidBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = 1 << kLenNumHighBits;
++ }
++ }
++ TREE_DECODE_CHECK(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state < 4)
++ {
++ unsigned posSlot;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++ kNumPosSlotBits);
++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
++ if (posSlot >= kStartPosModelIndex)
++ {
++ int numDirectBits = ((posSlot >> 1) - 1);
++
++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
++
++ if (posSlot < kEndPosModelIndex)
++ {
++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE_CHECK
++ range >>= 1;
++ code -= range & (((code - range) >> 31) - 1);
++ /* if (code >= range) code -= range; */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ numDirectBits = kNumAlignBits;
++ }
++ {
++ unsigned i = 1;
++ do
++ {
++ GET_BIT_CHECK(prob + i, i);
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ }
++ }
++ }
++ NORMALIZE_CHECK;
++ return res;
++}
++
++
++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
++{
++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
++ p->range = 0xFFFFFFFF;
++ p->needFlush = 0;
++}
++
++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++{
++ p->needFlush = 1;
++ p->remainLen = 0;
++ p->tempBufSize = 0;
++
++ if (initDic)
++ {
++ p->processedPos = 0;
++ p->checkDicSize = 0;
++ p->needInitState = 1;
++ }
++ if (initState)
++ p->needInitState = 1;
++}
++
++void LzmaDec_Init(CLzmaDec *p)
++{
++ p->dicPos = 0;
++ LzmaDec_InitDicAndState(p, True, True);
++}
++
++static void LzmaDec_InitStateReal(CLzmaDec *p)
++{
++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
++ UInt32 i;
++ CLzmaProb *probs = p->probs;
++ for (i = 0; i < numProbs; i++)
++ probs[i] = kBitModelTotal >> 1;
++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
++ p->state = 0;
++ p->needInitState = 0;
++}
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++ ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT inSize = *srcLen;
++ (*srcLen) = 0;
++ LzmaDec_WriteRem(p, dicLimit);
++
++ *status = LZMA_STATUS_NOT_SPECIFIED;
++
++ while (p->remainLen != kMatchSpecLenStart)
++ {
++ int checkEndMarkNow;
++
++ if (p->needFlush != 0)
++ {
++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
++ p->tempBuf[p->tempBufSize++] = *src++;
++ if (p->tempBufSize < RC_INIT_SIZE)
++ {
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (p->tempBuf[0] != 0)
++ return SZ_ERROR_DATA;
++
++ LzmaDec_InitRc(p, p->tempBuf);
++ p->tempBufSize = 0;
++ }
++
++ checkEndMarkNow = 0;
++ if (p->dicPos >= dicLimit)
++ {
++ if (p->remainLen == 0 && p->code == 0)
++ {
++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
++ return SZ_OK;
++ }
++ if (finishMode == LZMA_FINISH_ANY)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_OK;
++ }
++ if (p->remainLen != 0)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ checkEndMarkNow = 1;
++ }
++
++ if (p->needInitState)
++ LzmaDec_InitStateReal(p);
++
++ if (p->tempBufSize == 0)
++ {
++ SizeT processed;
++ const Byte *bufLimit;
++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ memcpy(p->tempBuf, src, inSize);
++ p->tempBufSize = (unsigned)inSize;
++ (*srcLen) += inSize;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ bufLimit = src;
++ }
++ else
++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
++ p->buf = src;
++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
++ return SZ_ERROR_DATA;
++ processed = (SizeT)(p->buf - src);
++ (*srcLen) += processed;
++ src += processed;
++ inSize -= processed;
++ }
++ else
++ {
++ unsigned rem = p->tempBufSize, lookAhead = 0;
++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
++ p->tempBuf[rem++] = src[lookAhead++];
++ p->tempBufSize = rem;
++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ (*srcLen) += lookAhead;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ }
++ p->buf = p->tempBuf;
++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
++ return SZ_ERROR_DATA;
++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
++ (*srcLen) += lookAhead;
++ src += lookAhead;
++ inSize -= lookAhead;
++ p->tempBufSize = 0;
++ }
++ }
++ if (p->code == 0)
++ *status = LZMA_STATUS_FINISHED_WITH_MARK;
++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
++}
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT outSize = *destLen;
++ SizeT inSize = *srcLen;
++ *srcLen = *destLen = 0;
++ for (;;)
++ {
++ SizeT inSizeCur = inSize, outSizeCur, dicPos;
++ ELzmaFinishMode curFinishMode;
++ SRes res;
++ if (p->dicPos == p->dicBufSize)
++ p->dicPos = 0;
++ dicPos = p->dicPos;
++ if (outSize > p->dicBufSize - dicPos)
++ {
++ outSizeCur = p->dicBufSize;
++ curFinishMode = LZMA_FINISH_ANY;
++ }
++ else
++ {
++ outSizeCur = dicPos + outSize;
++ curFinishMode = finishMode;
++ }
++
++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
++ src += inSizeCur;
++ inSize -= inSizeCur;
++ *srcLen += inSizeCur;
++ outSizeCur = p->dicPos - dicPos;
++ memcpy(dest, p->dic + dicPos, outSizeCur);
++ dest += outSizeCur;
++ outSize -= outSizeCur;
++ *destLen += outSizeCur;
++ if (res != 0)
++ return res;
++ if (outSizeCur == 0 || outSize == 0)
++ return SZ_OK;
++ }
++}
++
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->probs);
++ p->probs = 0;
++}
++
++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->dic);
++ p->dic = 0;
++}
++
++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++{
++ LzmaDec_FreeProbs(p, alloc);
++ LzmaDec_FreeDict(p, alloc);
++}
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++{
++ UInt32 dicSize;
++ Byte d;
++
++ if (size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_UNSUPPORTED;
++ else
++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
++
++ if (dicSize < LZMA_DIC_MIN)
++ dicSize = LZMA_DIC_MIN;
++ p->dicSize = dicSize;
++
++ d = data[0];
++ if (d >= (9 * 5 * 5))
++ return SZ_ERROR_UNSUPPORTED;
++
++ p->lc = d % 9;
++ d /= 9;
++ p->pb = d / 5;
++ p->lp = d % 5;
++
++ return SZ_OK;
++}
++
++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
++{
++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
++ if (p->probs == 0 || numProbs != p->numProbs)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
++ p->numProbs = numProbs;
++ if (p->probs == 0)
++ return SZ_ERROR_MEM;
++ }
++ return SZ_OK;
++}
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ SizeT dicBufSize;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ dicBufSize = propNew.dicSize;
++ if (p->dic == 0 || dicBufSize != p->dicBufSize)
++ {
++ LzmaDec_FreeDict(p, alloc);
++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
++ if (p->dic == 0)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ }
++ p->dicBufSize = dicBufSize;
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc)
++{
++ CLzmaDec p;
++ SRes res;
++ SizeT inSize = *srcLen;
++ SizeT outSize = *destLen;
++ *srcLen = *destLen = 0;
++ if (inSize < RC_INIT_SIZE)
++ return SZ_ERROR_INPUT_EOF;
++
++ LzmaDec_Construct(&p);
++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
++ if (res != 0)
++ return res;
++ p.dic = dest;
++ p.dicBufSize = outSize;
++
++ LzmaDec_Init(&p);
++
++ *srcLen = inSize;
++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
++
++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
++ res = SZ_ERROR_INPUT_EOF;
++
++ (*destLen) = p.dicPos;
++ LzmaDec_FreeProbs(&p, alloc);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/LzmaEnc.c
+@@ -0,0 +1,2271 @@
++/* LzmaEnc.c -- LZMA Encoder
++2009-11-24 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++/* #define SHOW_STAT */
++/* #define SHOW_STAT2 */
++
++#if defined(SHOW_STAT) || defined(SHOW_STAT2)
++#include <stdio.h>
++#endif
++
++#include "LzmaEnc.h"
++
++/* disable MT */
++#define _7ZIP_ST
++
++#include "LzFind.h"
++#ifndef _7ZIP_ST
++#include "LzFindMt.h"
++#endif
++
++#ifdef SHOW_STAT
++static int ttt = 0;
++#endif
++
++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
++
++#define kBlockSize (9 << 10)
++#define kUnpackBlockSize (1 << 18)
++#define kMatchArraySize (1 << 21)
++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
++
++#define kNumMaxDirectBits (31)
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++#define kProbInitValue (kBitModelTotal >> 1)
++
++#define kNumMoveReducingBits 4
++#define kNumBitPriceShiftBits 4
++#define kBitPrice (1 << kNumBitPriceShiftBits)
++
++void LzmaEncProps_Init(CLzmaEncProps *p)
++{
++ p->level = 5;
++ p->dictSize = p->mc = 0;
++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
++ p->writeEndMark = 0;
++}
++
++void LzmaEncProps_Normalize(CLzmaEncProps *p)
++{
++ int level = p->level;
++ if (level < 0) level = 5;
++ p->level = level;
++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
++ if (p->lc < 0) p->lc = 3;
++ if (p->lp < 0) p->lp = 0;
++ if (p->pb < 0) p->pb = 2;
++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
++ if (p->numHashBytes < 0) p->numHashBytes = 4;
++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
++ if (p->numThreads < 0)
++ p->numThreads =
++ #ifndef _7ZIP_ST
++ ((p->btMode && p->algo) ? 2 : 1);
++ #else
++ 1;
++ #endif
++}
++
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++{
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++ return props.dictSize;
++}
++
++/* #define LZMA_LOG_BSR */
++/* Define it for Intel's CPU */
++
++
++#ifdef LZMA_LOG_BSR
++
++#define kDicLogSizeMaxCompress 30
++
++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
++
++UInt32 GetPosSlot1(UInt32 pos)
++{
++ UInt32 res;
++ BSR2_RET(pos, res);
++ return res;
++}
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
++
++#else
++
++#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
++
++void LzmaEnc_FastPosInit(Byte *g_FastPos)
++{
++ int c = 2, slotFast;
++ g_FastPos[0] = 0;
++ g_FastPos[1] = 1;
++
++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
++ {
++ UInt32 k = (1 << ((slotFast >> 1) - 1));
++ UInt32 j;
++ for (j = 0; j < k; j++, c++)
++ g_FastPos[c] = (Byte)slotFast;
++ }
++}
++
++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
++ res = p->g_FastPos[pos >> i] + (i * 2); }
++/*
++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
++ p->g_FastPos[pos >> 6] + 12 : \
++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
++*/
++
++#define GetPosSlot1(pos) p->g_FastPos[pos]
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
++
++#endif
++
++
++#define LZMA_NUM_REPS 4
++
++typedef unsigned CState;
++
++typedef struct
++{
++ UInt32 price;
++
++ CState state;
++ int prev1IsChar;
++ int prev2;
++
++ UInt32 posPrev2;
++ UInt32 backPrev2;
++
++ UInt32 posPrev;
++ UInt32 backPrev;
++ UInt32 backs[LZMA_NUM_REPS];
++} COptimal;
++
++#define kNumOpts (1 << 12)
++
++#define kNumLenToPosStates 4
++#define kNumPosSlotBits 6
++#define kDicLogSizeMin 0
++#define kDicLogSizeMax 32
++#define kDistTableSizeMax (kDicLogSizeMax * 2)
++
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++#define kAlignMask (kAlignTableSize - 1)
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
++
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++#define LZMA_PB_MAX 4
++#define LZMA_LC_MAX 8
++#define LZMA_LP_MAX 4
++
++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
++
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define LZMA_MATCH_LEN_MIN 2
++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
++
++#define kNumStates 12
++
++typedef struct
++{
++ CLzmaProb choice;
++ CLzmaProb choice2;
++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
++ CLzmaProb high[kLenNumHighSymbols];
++} CLenEnc;
++
++typedef struct
++{
++ CLenEnc p;
++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
++ UInt32 tableSize;
++ UInt32 counters[LZMA_NUM_PB_STATES_MAX];
++} CLenPriceEnc;
++
++typedef struct
++{
++ UInt32 range;
++ Byte cache;
++ UInt64 low;
++ UInt64 cacheSize;
++ Byte *buf;
++ Byte *bufLim;
++ Byte *bufBase;
++ ISeqOutStream *outStream;
++ UInt64 processed;
++ SRes res;
++} CRangeEnc;
++
++typedef struct
++{
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++} CSaveState;
++
++typedef struct
++{
++ IMatchFinder matchFinder;
++ void *matchFinderObj;
++
++ #ifndef _7ZIP_ST
++ Bool mtMode;
++ CMatchFinderMt matchFinderMt;
++ #endif
++
++ CMatchFinder matchFinderBase;
++
++ #ifndef _7ZIP_ST
++ Byte pad[128];
++ #endif
++
++ UInt32 optimumEndIndex;
++ UInt32 optimumCurrentIndex;
++
++ UInt32 longestMatchLength;
++ UInt32 numPairs;
++ UInt32 numAvail;
++ COptimal opt[kNumOpts];
++
++ #ifndef LZMA_LOG_BSR
++ Byte g_FastPos[1 << kNumLogBits];
++ #endif
++
++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
++ UInt32 numFastBytes;
++ UInt32 additionalOffset;
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++
++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
++ UInt32 alignPrices[kAlignTableSize];
++ UInt32 alignPriceCount;
++
++ UInt32 distTableSize;
++
++ unsigned lc, lp, pb;
++ unsigned lpMask, pbMask;
++
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ unsigned lclp;
++
++ Bool fastMode;
++
++ CRangeEnc rc;
++
++ Bool writeEndMark;
++ UInt64 nowPos64;
++ UInt32 matchPriceCount;
++ Bool finished;
++ Bool multiThread;
++
++ SRes result;
++ UInt32 dictSize;
++ UInt32 matchFinderCycles;
++
++ int needInit;
++
++ CSaveState saveState;
++} CLzmaEnc;
++
++void LzmaEnc_SaveState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CSaveState *dest = &p->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
++}
++
++void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *dest = (CLzmaEnc *)pp;
++ const CSaveState *p = &dest->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
++}
++
++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++
++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
++ return SZ_ERROR_PARAM;
++ p->dictSize = props.dictSize;
++ p->matchFinderCycles = props.mc;
++ {
++ unsigned fb = props.fb;
++ if (fb < 5)
++ fb = 5;
++ if (fb > LZMA_MATCH_LEN_MAX)
++ fb = LZMA_MATCH_LEN_MAX;
++ p->numFastBytes = fb;
++ }
++ p->lc = props.lc;
++ p->lp = props.lp;
++ p->pb = props.pb;
++ p->fastMode = (props.algo == 0);
++ p->matchFinderBase.btMode = props.btMode;
++ {
++ UInt32 numHashBytes = 4;
++ if (props.btMode)
++ {
++ if (props.numHashBytes < 2)
++ numHashBytes = 2;
++ else if (props.numHashBytes < 4)
++ numHashBytes = props.numHashBytes;
++ }
++ p->matchFinderBase.numHashBytes = numHashBytes;
++ }
++
++ p->matchFinderBase.cutValue = props.mc;
++
++ p->writeEndMark = props.writeEndMark;
++
++ #ifndef _7ZIP_ST
++ /*
++ if (newMultiThread != _multiThread)
++ {
++ ReleaseMatchFinder();
++ _multiThread = newMultiThread;
++ }
++ */
++ p->multiThread = (props.numThreads > 1);
++ #endif
++
++ return SZ_OK;
++}
++
++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
++
++#define IsCharState(s) ((s) < 7)
++
++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
++
++#define kInfinityPrice (1 << 30)
++
++static void RangeEnc_Construct(CRangeEnc *p)
++{
++ p->outStream = 0;
++ p->bufBase = 0;
++}
++
++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
++
++#define RC_BUF_SIZE (1 << 16)
++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
++{
++ if (p->bufBase == 0)
++ {
++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
++ if (p->bufBase == 0)
++ return 0;
++ p->bufLim = p->bufBase + RC_BUF_SIZE;
++ }
++ return 1;
++}
++
++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->bufBase);
++ p->bufBase = 0;
++}
++
++static void RangeEnc_Init(CRangeEnc *p)
++{
++ /* Stream.Init(); */
++ p->low = 0;
++ p->range = 0xFFFFFFFF;
++ p->cacheSize = 1;
++ p->cache = 0;
++
++ p->buf = p->bufBase;
++
++ p->processed = 0;
++ p->res = SZ_OK;
++}
++
++static void RangeEnc_FlushStream(CRangeEnc *p)
++{
++ size_t num;
++ if (p->res != SZ_OK)
++ return;
++ num = p->buf - p->bufBase;
++ if (num != p->outStream->Write(p->outStream, p->bufBase, num))
++ p->res = SZ_ERROR_WRITE;
++ p->processed += num;
++ p->buf = p->bufBase;
++}
++
++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
++{
++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
++ {
++ Byte temp = p->cache;
++ do
++ {
++ Byte *buf = p->buf;
++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
++ p->buf = buf;
++ if (buf == p->bufLim)
++ RangeEnc_FlushStream(p);
++ temp = 0xFF;
++ }
++ while (--p->cacheSize != 0);
++ p->cache = (Byte)((UInt32)p->low >> 24);
++ }
++ p->cacheSize++;
++ p->low = (UInt32)p->low << 8;
++}
++
++static void RangeEnc_FlushData(CRangeEnc *p)
++{
++ int i;
++ for (i = 0; i < 5; i++)
++ RangeEnc_ShiftLow(p);
++}
++
++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
++{
++ do
++ {
++ p->range >>= 1;
++ p->low += p->range & (0 - ((value >> --numBits) & 1));
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++ }
++ while (numBits != 0);
++}
++
++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
++{
++ UInt32 ttt = *prob;
++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
++ if (symbol == 0)
++ {
++ p->range = newBound;
++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
++ }
++ else
++ {
++ p->low += newBound;
++ p->range -= newBound;
++ ttt -= ttt >> kNumMoveBits;
++ }
++ *prob = (CLzmaProb)ttt;
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++}
++
++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
++{
++ symbol |= 0x100;
++ do
++ {
++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++}
++
++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
++{
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++}
++
++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++{
++ UInt32 i;
++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
++ {
++ const int kCyclesBits = kNumBitPriceShiftBits;
++ UInt32 w = i;
++ UInt32 bitCount = 0;
++ int j;
++ for (j = 0; j < kCyclesBits; j++)
++ {
++ w = w * w;
++ bitCount <<= 1;
++ while (w >= ((UInt32)1 << 16))
++ {
++ w >>= 1;
++ bitCount++;
++ }
++ }
++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
++ }
++}
++
++
++#define GET_PRICE(prob, symbol) \
++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICEa(prob, symbol) \
++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= 0x100;
++ do
++ {
++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++
++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0;)
++ {
++ UInt32 bit;
++ i--;
++ bit = (symbol >> i) & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ }
++}
++
++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = 0; i < numBitLevels; i++)
++ {
++ UInt32 bit = symbol & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ symbol >>= 1;
++ }
++}
++
++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= (1 << numBitLevels);
++ while (symbol != 1)
++ {
++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
++ symbol >>= 1;
++ }
++ return price;
++}
++
++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0; i--)
++ {
++ UInt32 bit = symbol & 1;
++ symbol >>= 1;
++ price += GET_PRICEa(probs[m], bit);
++ m = (m << 1) | bit;
++ }
++ return price;
++}
++
++
++static void LenEnc_Init(CLenEnc *p)
++{
++ unsigned i;
++ p->choice = p->choice2 = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
++ p->low[i] = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
++ p->mid[i] = kProbInitValue;
++ for (i = 0; i < kLenNumHighSymbols; i++)
++ p->high[i] = kProbInitValue;
++}
++
++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
++{
++ if (symbol < kLenNumLowSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 0);
++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 1);
++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 0);
++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 1);
++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
++ }
++ }
++}
++
++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
++{
++ UInt32 a0 = GET_PRICE_0a(p->choice);
++ UInt32 a1 = GET_PRICE_1a(p->choice);
++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
++ UInt32 i = 0;
++ for (i = 0; i < kLenNumLowSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
++ }
++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
++ }
++ for (; i < numSymbols; i++)
++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
++}
++
++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
++{
++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
++ p->counters[posState] = p->tableSize;
++}
++
++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
++{
++ UInt32 posState;
++ for (posState = 0; posState < numPosStates; posState++)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
++{
++ LenEnc_Encode(&p->p, rc, symbol, posState);
++ if (updatePrice)
++ if (--p->counters[posState] == 0)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++
++
++
++static void MovePos(CLzmaEnc *p, UInt32 num)
++{
++ #ifdef SHOW_STAT
++ ttt += num;
++ printf("\n MovePos %d", num);
++ #endif
++ if (num != 0)
++ {
++ p->additionalOffset += num;
++ p->matchFinder.Skip(p->matchFinderObj, num);
++ }
++}
++
++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
++{
++ UInt32 lenRes = 0, numPairs;
++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
++ #ifdef SHOW_STAT
++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2);
++ ttt++;
++ {
++ UInt32 i;
++ for (i = 0; i < numPairs; i += 2)
++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]);
++ }
++ #endif
++ if (numPairs > 0)
++ {
++ lenRes = p->matches[numPairs - 2];
++ if (lenRes == p->numFastBytes)
++ {
++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ UInt32 distance = p->matches[numPairs - 1] + 1;
++ UInt32 numAvail = p->numAvail;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ {
++ const Byte *pby2 = pby - distance;
++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
++ }
++ }
++ }
++ p->additionalOffset++;
++ *numDistancePairsRes = numPairs;
++ return lenRes;
++}
++
++
++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
++#define IsShortRep(p) ((p)->backPrev == 0)
++
++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
++{
++ return
++ GET_PRICE_0(p->isRepG0[state]) +
++ GET_PRICE_0(p->isRep0Long[state][posState]);
++}
++
++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
++{
++ UInt32 price;
++ if (repIndex == 0)
++ {
++ price = GET_PRICE_0(p->isRepG0[state]);
++ price += GET_PRICE_1(p->isRep0Long[state][posState]);
++ }
++ else
++ {
++ price = GET_PRICE_1(p->isRepG0[state]);
++ if (repIndex == 1)
++ price += GET_PRICE_0(p->isRepG1[state]);
++ else
++ {
++ price += GET_PRICE_1(p->isRepG1[state]);
++ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
++ }
++ }
++ return price;
++}
++
++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
++{
++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
++ GetPureRepPrice(p, repIndex, state, posState);
++}
++
++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
++{
++ UInt32 posMem = p->opt[cur].posPrev;
++ UInt32 backMem = p->opt[cur].backPrev;
++ p->optimumEndIndex = cur;
++ do
++ {
++ if (p->opt[cur].prev1IsChar)
++ {
++ MakeAsChar(&p->opt[posMem])
++ p->opt[posMem].posPrev = posMem - 1;
++ if (p->opt[cur].prev2)
++ {
++ p->opt[posMem - 1].prev1IsChar = False;
++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
++ }
++ }
++ {
++ UInt32 posPrev = posMem;
++ UInt32 backCur = backMem;
++
++ backMem = p->opt[posPrev].backPrev;
++ posMem = p->opt[posPrev].posPrev;
++
++ p->opt[posPrev].backPrev = backCur;
++ p->opt[posPrev].posPrev = cur;
++ cur = posPrev;
++ }
++ }
++ while (cur != 0);
++ *backRes = p->opt[0].backPrev;
++ p->optimumCurrentIndex = p->opt[0].posPrev;
++ return p->optimumCurrentIndex;
++}
++
++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
++
++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
++ UInt32 matchPrice, repMatchPrice, normalMatchPrice;
++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
++ UInt32 *matches;
++ const Byte *data;
++ Byte curByte, matchByte;
++ if (p->optimumEndIndex != p->optimumCurrentIndex)
++ {
++ const COptimal *opt = &p->opt[p->optimumCurrentIndex];
++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
++ *backRes = opt->backPrev;
++ p->optimumCurrentIndex = opt->posPrev;
++ return lenRes;
++ }
++ p->optimumCurrentIndex = p->optimumEndIndex = 0;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ if (numAvail < 2)
++ {
++ *backRes = (UInt32)(-1);
++ return 1;
++ }
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ repMaxIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 lenTest;
++ const Byte *data2;
++ reps[i] = p->reps[i];
++ data2 = data - (reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ {
++ repLens[i] = 0;
++ continue;
++ }
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ repLens[i] = lenTest;
++ if (lenTest > repLens[repMaxIndex])
++ repMaxIndex = i;
++ }
++ if (repLens[repMaxIndex] >= p->numFastBytes)
++ {
++ UInt32 lenRes;
++ *backRes = repMaxIndex;
++ lenRes = repLens[repMaxIndex];
++ MovePos(p, lenRes - 1);
++ return lenRes;
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
++ {
++ *backRes = (UInt32)-1;
++ return 1;
++ }
++
++ p->opt[0].state = (CState)p->state;
++
++ posState = (position & p->pbMask);
++
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
++ (!IsCharState(p->state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ MakeAsChar(&p->opt[1]);
++
++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
++
++ if (matchByte == curByte)
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
++ if (shortRepPrice < p->opt[1].price)
++ {
++ p->opt[1].price = shortRepPrice;
++ MakeAsShortRep(&p->opt[1]);
++ }
++ }
++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
++
++ if (lenEnd < 2)
++ {
++ *backRes = p->opt[1].backPrev;
++ return 1;
++ }
++
++ p->opt[1].posPrev = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ p->opt[0].backs[i] = reps[i];
++
++ len = lenEnd;
++ do
++ p->opt[len--].price = kInfinityPrice;
++ while (len >= 2);
++
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 repLen = repLens[i];
++ UInt32 price;
++ if (repLen < 2)
++ continue;
++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
++ COptimal *opt = &p->opt[repLen];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = i;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--repLen >= 2);
++ }
++
++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
++
++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
++ if (len <= mainLen)
++ {
++ UInt32 offs = 0;
++ while (len > matches[offs])
++ offs += 2;
++ for (; ; len++)
++ {
++ COptimal *opt;
++ UInt32 distance = matches[offs + 1];
++
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(len);
++ if (distance < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][distance];
++ else
++ {
++ UInt32 slot;
++ GetPosSlot2(distance, slot);
++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
++ }
++ opt = &p->opt[len];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = distance + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++ if (len == matches[offs])
++ {
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ }
++ }
++ }
++
++ cur = 0;
++
++ #ifdef SHOW_STAT2
++ if (position >= 0)
++ {
++ unsigned i;
++ printf("\n pos = %4X", position);
++ for (i = cur; i <= lenEnd; i++)
++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
++ }
++ #endif
++
++ for (;;)
++ {
++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
++ Bool nextIsChar;
++ Byte curByte, matchByte;
++ const Byte *data;
++ COptimal *curOpt;
++ COptimal *nextOpt;
++
++ cur++;
++ if (cur == lenEnd)
++ return Backward(p, backRes, cur);
++
++ newLen = ReadMatchDistances(p, &numPairs);
++ if (newLen >= p->numFastBytes)
++ {
++ p->numPairs = numPairs;
++ p->longestMatchLength = newLen;
++ return Backward(p, backRes, cur);
++ }
++ position++;
++ curOpt = &p->opt[cur];
++ posPrev = curOpt->posPrev;
++ if (curOpt->prev1IsChar)
++ {
++ posPrev--;
++ if (curOpt->prev2)
++ {
++ state = p->opt[curOpt->posPrev2].state;
++ if (curOpt->backPrev2 < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ state = kLiteralNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ if (posPrev == cur - 1)
++ {
++ if (IsShortRep(curOpt))
++ state = kShortRepNextStates[state];
++ else
++ state = kLiteralNextStates[state];
++ }
++ else
++ {
++ UInt32 pos;
++ const COptimal *prevOpt;
++ if (curOpt->prev1IsChar && curOpt->prev2)
++ {
++ posPrev = curOpt->posPrev2;
++ pos = curOpt->backPrev2;
++ state = kRepNextStates[state];
++ }
++ else
++ {
++ pos = curOpt->backPrev;
++ if (pos < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ prevOpt = &p->opt[posPrev];
++ if (pos < LZMA_NUM_REPS)
++ {
++ UInt32 i;
++ reps[0] = prevOpt->backs[pos];
++ for (i = 1; i <= pos; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ for (; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i];
++ }
++ else
++ {
++ UInt32 i;
++ reps[0] = (pos - LZMA_NUM_REPS);
++ for (i = 1; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ }
++ }
++ curOpt->state = (CState)state;
++
++ curOpt->backs[0] = reps[0];
++ curOpt->backs[1] = reps[1];
++ curOpt->backs[2] = reps[2];
++ curOpt->backs[3] = reps[3];
++
++ curPrice = curOpt->price;
++ nextIsChar = False;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ posState = (position & p->pbMask);
++
++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ curAnd1Price +=
++ (!IsCharState(state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ nextOpt = &p->opt[cur + 1];
++
++ if (curAnd1Price < nextOpt->price)
++ {
++ nextOpt->price = curAnd1Price;
++ nextOpt->posPrev = cur;
++ MakeAsChar(nextOpt);
++ nextIsChar = True;
++ }
++
++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
++
++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
++ if (shortRepPrice <= nextOpt->price)
++ {
++ nextOpt->price = shortRepPrice;
++ nextOpt->posPrev = cur;
++ MakeAsShortRep(nextOpt);
++ nextIsChar = True;
++ }
++ }
++ numAvailFull = p->numAvail;
++ {
++ UInt32 temp = kNumOpts - 1 - cur;
++ if (temp < numAvailFull)
++ numAvailFull = temp;
++ }
++
++ if (numAvailFull < 2)
++ continue;
++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
++
++ if (!nextIsChar && matchByte != curByte) /* speed optimization */
++ {
++ /* try Literal + rep0 */
++ UInt32 temp;
++ UInt32 lenTest2;
++ const Byte *data2 = data - (reps[0] + 1);
++ UInt32 limit = p->numFastBytes + 1;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++
++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
++ lenTest2 = temp - 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kLiteralNextStates[state];
++ UInt32 posStateNext = (position + 1) & p->pbMask;
++ UInt32 nextRepMatchPrice = curAnd1Price +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = False;
++ }
++ }
++ }
++ }
++
++ startLen = 2; /* speed optimization */
++ {
++ UInt32 repIndex;
++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
++ {
++ UInt32 lenTest;
++ UInt32 lenTestTemp;
++ UInt32 price;
++ const Byte *data2 = data - (reps[repIndex] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ while (lenEnd < cur + lenTest)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ lenTestTemp = lenTest;
++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
++ COptimal *opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = repIndex;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--lenTest >= 2);
++ lenTest = lenTestTemp;
++
++ if (repIndex == 0)
++ startLen = lenTest + 1;
++
++ /* if (_maxMode) */
++ {
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kRepNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice =
++ price + p->repLenEnc.prices[posState][lenTest - 2] +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (position + lenTest + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = repIndex;
++ }
++ }
++ }
++ }
++ }
++ }
++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
++ if (newLen > numAvail)
++ {
++ newLen = numAvail;
++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
++ matches[numPairs] = newLen;
++ numPairs += 2;
++ }
++ if (newLen >= startLen)
++ {
++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
++ UInt32 offs, curBack, posSlot;
++ UInt32 lenTest;
++ while (lenEnd < cur + newLen)
++ p->opt[++lenEnd].price = kInfinityPrice;
++
++ offs = 0;
++ while (startLen > matches[offs])
++ offs += 2;
++ curBack = matches[offs + 1];
++ GetPosSlot2(curBack, posSlot);
++ for (lenTest = /*2*/ startLen; ; lenTest++)
++ {
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(lenTest);
++ COptimal *opt;
++ if (curBack < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
++ else
++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
++
++ opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = curBack + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++
++ if (/*_maxMode && */lenTest == matches[offs])
++ {
++ /* Try Match + Literal + Rep0 */
++ const Byte *data2 = data - (curBack + 1);
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kMatchNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice = curAndLenPrice +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (posStateNext + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = curBack + LZMA_NUM_REPS;
++ }
++ }
++ }
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ curBack = matches[offs + 1];
++ if (curBack >= kNumFullDistances)
++ GetPosSlot2(curBack, posSlot);
++ }
++ }
++ }
++ }
++}
++
++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
++
++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
++ const Byte *data;
++ const UInt32 *matches;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ *backRes = (UInt32)-1;
++ if (numAvail < 2)
++ return 1;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++
++ repLen = repIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (len = 2; len < numAvail && data[len] == data2[len]; len++);
++ if (len >= p->numFastBytes)
++ {
++ *backRes = i;
++ MovePos(p, len - 1);
++ return len;
++ }
++ if (len > repLen)
++ {
++ repIndex = i;
++ repLen = len;
++ }
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++
++ mainDist = 0; /* for GCC */
++ if (mainLen >= 2)
++ {
++ mainDist = matches[numPairs - 1];
++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
++ {
++ if (!ChangePair(matches[numPairs - 3], mainDist))
++ break;
++ numPairs -= 2;
++ mainLen = matches[numPairs - 2];
++ mainDist = matches[numPairs - 1];
++ }
++ if (mainLen == 2 && mainDist >= 0x80)
++ mainLen = 1;
++ }
++
++ if (repLen >= 2 && (
++ (repLen + 1 >= mainLen) ||
++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
++ (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
++ {
++ *backRes = repIndex;
++ MovePos(p, repLen - 1);
++ return repLen;
++ }
++
++ if (mainLen < 2 || numAvail <= 2)
++ return 1;
++
++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
++ if (p->longestMatchLength >= 2)
++ {
++ UInt32 newDistance = matches[p->numPairs - 1];
++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
++ (p->longestMatchLength > mainLen + 1) ||
++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
++ return 1;
++ }
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len, limit;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ limit = mainLen - 1;
++ for (len = 2; len < limit && data[len] == data2[len]; len++);
++ if (len >= limit)
++ return 1;
++ }
++ *backRes = mainDist + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 2);
++ return mainLen;
++}
++
++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
++{
++ UInt32 len;
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ len = LZMA_MATCH_LEN_MIN;
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
++}
++
++static SRes CheckErrors(CLzmaEnc *p)
++{
++ if (p->result != SZ_OK)
++ return p->result;
++ if (p->rc.res != SZ_OK)
++ p->result = SZ_ERROR_WRITE;
++ if (p->matchFinderBase.result != SZ_OK)
++ p->result = SZ_ERROR_READ;
++ if (p->result != SZ_OK)
++ p->finished = True;
++ return p->result;
++}
++
++static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
++{
++ /* ReleaseMFStream(); */
++ p->finished = True;
++ if (p->writeEndMark)
++ WriteEndMarker(p, nowPos & p->pbMask);
++ RangeEnc_FlushData(&p->rc);
++ RangeEnc_FlushStream(&p->rc);
++ return CheckErrors(p);
++}
++
++static void FillAlignPrices(CLzmaEnc *p)
++{
++ UInt32 i;
++ for (i = 0; i < kAlignTableSize; i++)
++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
++ p->alignPriceCount = 0;
++}
++
++static void FillDistancesPrices(CLzmaEnc *p)
++{
++ UInt32 tempPrices[kNumFullDistances];
++ UInt32 i, lenToPosState;
++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
++ {
++ UInt32 posSlot = GetPosSlot1(i);
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
++ }
++
++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
++ {
++ UInt32 posSlot;
++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
++
++ {
++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
++ UInt32 i;
++ for (i = 0; i < kStartPosModelIndex; i++)
++ distancesPrices[i] = posSlotPrices[i];
++ for (; i < kNumFullDistances; i++)
++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
++ }
++ }
++ p->matchPriceCount = 0;
++}
++
++void LzmaEnc_Construct(CLzmaEnc *p)
++{
++ RangeEnc_Construct(&p->rc);
++ MatchFinder_Construct(&p->matchFinderBase);
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Construct(&p->matchFinderMt);
++ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
++ #endif
++
++ {
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++ LzmaEnc_SetProps(p, &props);
++ }
++
++ #ifndef LZMA_LOG_BSR
++ LzmaEnc_FastPosInit(p->g_FastPos);
++ #endif
++
++ LzmaEnc_InitPriceTables(p->ProbPrices);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
++{
++ void *p;
++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
++ if (p != 0)
++ LzmaEnc_Construct((CLzmaEnc *)p);
++ return p;
++}
++
++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->litProbs);
++ alloc->Free(alloc, p->saveState.litProbs);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
++ #endif
++ MatchFinder_Free(&p->matchFinderBase, allocBig);
++ LzmaEnc_FreeLits(p, alloc);
++ RangeEnc_Free(&p->rc, alloc);
++}
++
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
++ alloc->Free(alloc, p);
++}
++
++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
++{
++ UInt32 nowPos32, startPos32;
++ if (p->needInit)
++ {
++ p->matchFinder.Init(p->matchFinderObj);
++ p->needInit = 0;
++ }
++
++ if (p->finished)
++ return p->result;
++ RINOK(CheckErrors(p));
++
++ nowPos32 = (UInt32)p->nowPos64;
++ startPos32 = nowPos32;
++
++ if (p->nowPos64 == 0)
++ {
++ UInt32 numPairs;
++ Byte curByte;
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ return Flush(p, nowPos32);
++ ReadMatchDistances(p, &numPairs);
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
++ p->state = kLiteralNextStates[p->state];
++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
++ LitEnc_Encode(&p->rc, p->litProbs, curByte);
++ p->additionalOffset--;
++ nowPos32++;
++ }
++
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
++ for (;;)
++ {
++ UInt32 pos, len, posState;
++
++ if (p->fastMode)
++ len = GetOptimumFast(p, &pos);
++ else
++ len = GetOptimum(p, nowPos32, &pos);
++
++ #ifdef SHOW_STAT2
++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos);
++ #endif
++
++ posState = nowPos32 & p->pbMask;
++ if (len == 1 && pos == (UInt32)-1)
++ {
++ Byte curByte;
++ CLzmaProb *probs;
++ const Byte *data;
++
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++ curByte = *data;
++ probs = LIT_PROBS(nowPos32, *(data - 1));
++ if (IsCharState(p->state))
++ LitEnc_Encode(&p->rc, probs, curByte);
++ else
++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
++ p->state = kLiteralNextStates[p->state];
++ }
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ if (pos < LZMA_NUM_REPS)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
++ if (pos == 0)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
++ }
++ else
++ {
++ UInt32 distance = p->reps[pos];
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
++ if (pos == 1)
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
++ if (pos == 3)
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ }
++ p->reps[1] = p->reps[0];
++ p->reps[0] = distance;
++ }
++ if (len == 1)
++ p->state = kShortRepNextStates[p->state];
++ else
++ {
++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ p->state = kRepNextStates[p->state];
++ }
++ }
++ else
++ {
++ UInt32 posSlot;
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ pos -= LZMA_NUM_REPS;
++ GetPosSlot(pos, posSlot);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
++
++ if (posSlot >= kStartPosModelIndex)
++ {
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ UInt32 posReduced = pos - base;
++
++ if (posSlot < kEndPosModelIndex)
++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
++ else
++ {
++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
++ p->alignPriceCount++;
++ }
++ }
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ p->reps[1] = p->reps[0];
++ p->reps[0] = pos;
++ p->matchPriceCount++;
++ }
++ }
++ p->additionalOffset -= len;
++ nowPos32 += len;
++ if (p->additionalOffset == 0)
++ {
++ UInt32 processed;
++ if (!p->fastMode)
++ {
++ if (p->matchPriceCount >= (1 << 7))
++ FillDistancesPrices(p);
++ if (p->alignPriceCount >= kAlignTableSize)
++ FillAlignPrices(p);
++ }
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ break;
++ processed = nowPos32 - startPos32;
++ if (useLimits)
++ {
++ if (processed + kNumOpts + 300 >= maxUnpackSize ||
++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
++ break;
++ }
++ else if (processed >= (1 << 15))
++ {
++ p->nowPos64 += nowPos32 - startPos32;
++ return CheckErrors(p);
++ }
++ }
++ }
++ p->nowPos64 += nowPos32 - startPos32;
++ return Flush(p, nowPos32);
++}
++
++#define kBigHashDicLimit ((UInt32)1 << 24)
++
++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 beforeSize = kNumOpts;
++ Bool btMode;
++ if (!RangeEnc_Alloc(&p->rc, alloc))
++ return SZ_ERROR_MEM;
++ btMode = (p->matchFinderBase.btMode != 0);
++ #ifndef _7ZIP_ST
++ p->mtMode = (p->multiThread && !p->fastMode && btMode);
++ #endif
++
++ {
++ unsigned lclp = p->lc + p->lp;
++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ if (p->litProbs == 0 || p->saveState.litProbs == 0)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ p->lclp = lclp;
++ }
++ }
++
++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
++
++ if (beforeSize + p->dictSize < keepWindowSize)
++ beforeSize = keepWindowSize - p->dictSize;
++
++ #ifndef _7ZIP_ST
++ if (p->mtMode)
++ {
++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
++ p->matchFinderObj = &p->matchFinderMt;
++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
++ }
++ else
++ #endif
++ {
++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
++ return SZ_ERROR_MEM;
++ p->matchFinderObj = &p->matchFinderBase;
++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
++ }
++ return SZ_OK;
++}
++
++void LzmaEnc_Init(CLzmaEnc *p)
++{
++ UInt32 i;
++ p->state = 0;
++ for (i = 0 ; i < LZMA_NUM_REPS; i++)
++ p->reps[i] = 0;
++
++ RangeEnc_Init(&p->rc);
++
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ UInt32 j;
++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
++ {
++ p->isMatch[i][j] = kProbInitValue;
++ p->isRep0Long[i][j] = kProbInitValue;
++ }
++ p->isRep[i] = kProbInitValue;
++ p->isRepG0[i] = kProbInitValue;
++ p->isRepG1[i] = kProbInitValue;
++ p->isRepG2[i] = kProbInitValue;
++ }
++
++ {
++ UInt32 num = 0x300 << (p->lp + p->lc);
++ for (i = 0; i < num; i++)
++ p->litProbs[i] = kProbInitValue;
++ }
++
++ {
++ for (i = 0; i < kNumLenToPosStates; i++)
++ {
++ CLzmaProb *probs = p->posSlotEncoder[i];
++ UInt32 j;
++ for (j = 0; j < (1 << kNumPosSlotBits); j++)
++ probs[j] = kProbInitValue;
++ }
++ }
++ {
++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
++ p->posEncoders[i] = kProbInitValue;
++ }
++
++ LenEnc_Init(&p->lenEnc.p);
++ LenEnc_Init(&p->repLenEnc.p);
++
++ for (i = 0; i < (1 << kNumAlignBits); i++)
++ p->posAlignEncoder[i] = kProbInitValue;
++
++ p->optimumEndIndex = 0;
++ p->optimumCurrentIndex = 0;
++ p->additionalOffset = 0;
++
++ p->pbMask = (1 << p->pb) - 1;
++ p->lpMask = (1 << p->lp) - 1;
++}
++
++void LzmaEnc_InitPrices(CLzmaEnc *p)
++{
++ if (!p->fastMode)
++ {
++ FillDistancesPrices(p);
++ FillAlignPrices(p);
++ }
++
++ p->lenEnc.tableSize =
++ p->repLenEnc.tableSize =
++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
++}
++
++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 i;
++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
++ if (p->dictSize <= ((UInt32)1 << i))
++ break;
++ p->distTableSize = i * 2;
++
++ p->finished = False;
++ p->result = SZ_OK;
++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ p->nowPos64 = 0;
++ return SZ_OK;
++}
++
++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ p->rc.outStream = outStream;
++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
++}
++
++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
++ ISeqInStream *inStream, UInt32 keepWindowSize,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
++{
++ p->matchFinderBase.directInput = 1;
++ p->matchFinderBase.bufferBase = (Byte *)src;
++ p->matchFinderBase.directInputRem = srcLen;
++}
++
++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++ p->needInit = 1;
++
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++void LzmaEnc_Finish(CLzmaEncHandle pp)
++{
++ #ifndef _7ZIP_ST
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ if (p->mtMode)
++ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
++ #else
++ pp = pp;
++ #endif
++}
++
++typedef struct
++{
++ ISeqOutStream funcTable;
++ Byte *data;
++ SizeT rem;
++ Bool overflow;
++} CSeqOutStreamBuf;
++
++static size_t MyWrite(void *pp, const void *data, size_t size)
++{
++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
++ if (p->rem < size)
++ {
++ size = p->rem;
++ p->overflow = True;
++ }
++ memcpy(p->data, data, size);
++ p->rem -= size;
++ p->data += size;
++ return size;
++}
++
++
++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++}
++
++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++}
++
++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ UInt64 nowPos64;
++ SRes res;
++ CSeqOutStreamBuf outStream;
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = False;
++ p->finished = False;
++ p->result = SZ_OK;
++
++ if (reInit)
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ nowPos64 = p->nowPos64;
++ RangeEnc_Init(&p->rc);
++ p->rc.outStream = &outStream.funcTable;
++
++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
++
++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++
++ return res;
++}
++
++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
++{
++ SRes res = SZ_OK;
++
++ #ifndef _7ZIP_ST
++ Byte allocaDummy[0x300];
++ int i = 0;
++ for (i = 0; i < 16; i++)
++ allocaDummy[i] = (Byte)i;
++ #endif
++
++ for (;;)
++ {
++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
++ if (res != SZ_OK || p->finished != 0)
++ break;
++ if (progress != 0)
++ {
++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
++ if (res != SZ_OK)
++ {
++ res = SZ_ERROR_PROGRESS;
++ break;
++ }
++ }
++ }
++ LzmaEnc_Finish(p);
++ return res;
++}
++
++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
++}
++
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ int i;
++ UInt32 dictSize = p->dictSize;
++ if (*size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_PARAM;
++ *size = LZMA_PROPS_SIZE;
++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
++
++ for (i = 11; i <= 30; i++)
++ {
++ if (dictSize <= ((UInt32)2 << i))
++ {
++ dictSize = (2 << i);
++ break;
++ }
++ if (dictSize <= ((UInt32)3 << i))
++ {
++ dictSize = (3 << i);
++ break;
++ }
++ }
++
++ for (i = 0; i < 4; i++)
++ props[1 + i] = (Byte)(dictSize >> (8 * i));
++ return SZ_OK;
++}
++
++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ SRes res;
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++
++ CSeqOutStreamBuf outStream;
++
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = writeEndMark;
++
++ p->rc.outStream = &outStream.funcTable;
++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
++ if (res == SZ_OK)
++ res = LzmaEnc_Encode2(p, progress);
++
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++ return res;
++}
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
++ SRes res;
++ if (p == 0)
++ return SZ_ERROR_MEM;
++
++ res = LzmaEnc_SetProps(p, props);
++ if (res == SZ_OK)
++ {
++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
++ if (res == SZ_OK)
++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
++ writeEndMark, progress, alloc, allocBig);
++ }
++
++ LzmaEnc_Destroy(p, alloc, allocBig);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/Makefile
+@@ -0,0 +1,7 @@
++lzma_compress-objs := LzFind.o LzmaEnc.o
++lzma_decompress-objs := LzmaDec.o
++
++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
++
++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/target/linux/generic/patches-3.3/511-debloat_lzma.patch b/target/linux/generic/patches-3.3/511-debloat_lzma.patch
new file mode 100644
index 0000000..1e41661
--- /dev/null
+++ b/target/linux/generic/patches-3.3/511-debloat_lzma.patch
@@ -0,0 +1,485 @@
+--- a/include/linux/lzma/LzmaDec.h
++++ b/include/linux/lzma/LzmaDec.h
+@@ -31,14 +31,6 @@ typedef struct _CLzmaProps
+ UInt32 dicSize;
+ } CLzmaProps;
+
+-/* LzmaProps_Decode - decodes properties
+-Returns:
+- SZ_OK
+- SZ_ERROR_UNSUPPORTED - Unsupported properties
+-*/
+-
+-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
+-
+
+ /* ---------- LZMA Decoder state ---------- */
+
+@@ -70,8 +62,6 @@ typedef struct
+
+ #define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
+
+-void LzmaDec_Init(CLzmaDec *p);
+-
+ /* There are two types of LZMA streams:
+ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
+ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
+@@ -108,97 +98,6 @@ typedef enum
+
+ /* ELzmaStatus is used only as output value for function call */
+
+-
+-/* ---------- Interfaces ---------- */
+-
+-/* There are 3 levels of interfaces:
+- 1) Dictionary Interface
+- 2) Buffer Interface
+- 3) One Call Interface
+- You can select any of these interfaces, but don't mix functions from different
+- groups for same object. */
+-
+-
+-/* There are two variants to allocate state for Dictionary Interface:
+- 1) LzmaDec_Allocate / LzmaDec_Free
+- 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
+- You can use variant 2, if you set dictionary buffer manually.
+- For Buffer Interface you must always use variant 1.
+-
+-LzmaDec_Allocate* can return:
+- SZ_OK
+- SZ_ERROR_MEM - Memory allocation error
+- SZ_ERROR_UNSUPPORTED - Unsupported properties
+-*/
+-
+-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
+-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
+-
+-SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
+-void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
+-
+-/* ---------- Dictionary Interface ---------- */
+-
+-/* You can use it, if you want to eliminate the overhead for data copying from
+- dictionary to some other external buffer.
+- You must work with CLzmaDec variables directly in this interface.
+-
+- STEPS:
+- LzmaDec_Constr()
+- LzmaDec_Allocate()
+- for (each new stream)
+- {
+- LzmaDec_Init()
+- while (it needs more decompression)
+- {
+- LzmaDec_DecodeToDic()
+- use data from CLzmaDec::dic and update CLzmaDec::dicPos
+- }
+- }
+- LzmaDec_Free()
+-*/
+-
+-/* LzmaDec_DecodeToDic
+-
+- The decoding to internal dictionary buffer (CLzmaDec::dic).
+- You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
+-
+-finishMode:
+- It has meaning only if the decoding reaches output limit (dicLimit).
+- LZMA_FINISH_ANY - Decode just dicLimit bytes.
+- LZMA_FINISH_END - Stream must be finished after dicLimit.
+-
+-Returns:
+- SZ_OK
+- status:
+- LZMA_STATUS_FINISHED_WITH_MARK
+- LZMA_STATUS_NOT_FINISHED
+- LZMA_STATUS_NEEDS_MORE_INPUT
+- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+- SZ_ERROR_DATA - Data error
+-*/
+-
+-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
+- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+-
+-
+-/* ---------- Buffer Interface ---------- */
+-
+-/* It's zlib-like interface.
+- See LzmaDec_DecodeToDic description for information about STEPS and return results,
+- but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
+- to work with CLzmaDec variables manually.
+-
+-finishMode:
+- It has meaning only if the decoding reaches output limit (*destLen).
+- LZMA_FINISH_ANY - Decode just destLen bytes.
+- LZMA_FINISH_END - Stream must be finished after (*destLen).
+-*/
+-
+-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
+- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+-
+-
+ /* ---------- One Call Interface ---------- */
+
+ /* LzmaDecode
+--- a/lib/lzma/LzmaDec.c
++++ b/lib/lzma/LzmaDec.c
+@@ -682,7 +682,7 @@ static void LzmaDec_InitRc(CLzmaDec *p,
+ p->needFlush = 0;
+ }
+
+-void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++static void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
+ {
+ p->needFlush = 1;
+ p->remainLen = 0;
+@@ -698,7 +698,7 @@ void LzmaDec_InitDicAndState(CLzmaDec *p
+ p->needInitState = 1;
+ }
+
+-void LzmaDec_Init(CLzmaDec *p)
++static void LzmaDec_Init(CLzmaDec *p)
+ {
+ p->dicPos = 0;
+ LzmaDec_InitDicAndState(p, True, True);
+@@ -716,7 +716,7 @@ static void LzmaDec_InitStateReal(CLzmaD
+ p->needInitState = 0;
+ }
+
+-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++static SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
+ ELzmaFinishMode finishMode, ELzmaStatus *status)
+ {
+ SizeT inSize = *srcLen;
+@@ -837,7 +837,7 @@ SRes LzmaDec_DecodeToDic(CLzmaDec *p, Si
+ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
+ }
+
+-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++static __maybe_unused SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+ {
+ SizeT outSize = *destLen;
+ SizeT inSize = *srcLen;
+@@ -877,7 +877,7 @@ SRes LzmaDec_DecodeToBuf(CLzmaDec *p, By
+ }
+ }
+
+-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++static void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
+ {
+ alloc->Free(alloc, p->probs);
+ p->probs = 0;
+@@ -889,13 +889,13 @@ static void LzmaDec_FreeDict(CLzmaDec *p
+ p->dic = 0;
+ }
+
+-void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++static void __maybe_unused LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
+ {
+ LzmaDec_FreeProbs(p, alloc);
+ LzmaDec_FreeDict(p, alloc);
+ }
+
+-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++static SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
+ {
+ UInt32 dicSize;
+ Byte d;
+@@ -935,7 +935,7 @@ static SRes LzmaDec_AllocateProbs2(CLzma
+ return SZ_OK;
+ }
+
+-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++static SRes __maybe_unused LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+ {
+ CLzmaProps propNew;
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+@@ -944,7 +944,7 @@ SRes LzmaDec_AllocateProbs(CLzmaDec *p,
+ return SZ_OK;
+ }
+
+-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++static SRes __maybe_unused LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+ {
+ CLzmaProps propNew;
+ SizeT dicBufSize;
+--- a/include/linux/lzma/LzmaEnc.h
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -31,9 +31,6 @@ typedef struct _CLzmaEncProps
+ } CLzmaEncProps;
+
+ void LzmaEncProps_Init(CLzmaEncProps *p);
+-void LzmaEncProps_Normalize(CLzmaEncProps *p);
+-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
+-
+
+ /* ---------- CLzmaEncHandle Interface ---------- */
+
+@@ -53,26 +50,9 @@ CLzmaEncHandle LzmaEnc_Create(ISzAlloc *
+ void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
+ SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
+ SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
+-SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
+- ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+ SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+
+-/* ---------- One Call Interface ---------- */
+-
+-/* LzmaEncode
+-Return code:
+- SZ_OK - OK
+- SZ_ERROR_MEM - Memory allocation error
+- SZ_ERROR_PARAM - Incorrect paramater
+- SZ_ERROR_OUTPUT_EOF - output buffer overflow
+- SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+-*/
+-
+-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+- const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+- ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+-
+ #ifdef __cplusplus
+ }
+ #endif
+--- a/lib/lzma/LzmaEnc.c
++++ b/lib/lzma/LzmaEnc.c
+@@ -53,7 +53,7 @@ void LzmaEncProps_Init(CLzmaEncProps *p)
+ p->writeEndMark = 0;
+ }
+
+-void LzmaEncProps_Normalize(CLzmaEncProps *p)
++static void LzmaEncProps_Normalize(CLzmaEncProps *p)
+ {
+ int level = p->level;
+ if (level < 0) level = 5;
+@@ -76,7 +76,7 @@ void LzmaEncProps_Normalize(CLzmaEncProp
+ #endif
+ }
+
+-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++static UInt32 __maybe_unused LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
+ {
+ CLzmaEncProps props = *props2;
+ LzmaEncProps_Normalize(&props);
+@@ -93,7 +93,7 @@ UInt32 LzmaEncProps_GetDictSize(const CL
+
+ #define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
+
+-UInt32 GetPosSlot1(UInt32 pos)
++static UInt32 GetPosSlot1(UInt32 pos)
+ {
+ UInt32 res;
+ BSR2_RET(pos, res);
+@@ -107,7 +107,7 @@ UInt32 GetPosSlot1(UInt32 pos)
+ #define kNumLogBits (9 + (int)sizeof(size_t) / 2)
+ #define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
+
+-void LzmaEnc_FastPosInit(Byte *g_FastPos)
++static void LzmaEnc_FastPosInit(Byte *g_FastPos)
+ {
+ int c = 2, slotFast;
+ g_FastPos[0] = 0;
+@@ -339,7 +339,7 @@ typedef struct
+ CSaveState saveState;
+ } CLzmaEnc;
+
+-void LzmaEnc_SaveState(CLzmaEncHandle pp)
++static void __maybe_unused LzmaEnc_SaveState(CLzmaEncHandle pp)
+ {
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ CSaveState *dest = &p->saveState;
+@@ -365,7 +365,7 @@ void LzmaEnc_SaveState(CLzmaEncHandle pp
+ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
+ }
+
+-void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++static void __maybe_unused LzmaEnc_RestoreState(CLzmaEncHandle pp)
+ {
+ CLzmaEnc *dest = (CLzmaEnc *)pp;
+ const CSaveState *p = &dest->saveState;
+@@ -600,7 +600,7 @@ static void LitEnc_EncodeMatched(CRangeE
+ while (symbol < 0x10000);
+ }
+
+-void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++static void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
+ {
+ UInt32 i;
+ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
+@@ -1676,7 +1676,7 @@ static void FillDistancesPrices(CLzmaEnc
+ p->matchPriceCount = 0;
+ }
+
+-void LzmaEnc_Construct(CLzmaEnc *p)
++static void LzmaEnc_Construct(CLzmaEnc *p)
+ {
+ RangeEnc_Construct(&p->rc);
+ MatchFinder_Construct(&p->matchFinderBase);
+@@ -1709,7 +1709,7 @@ CLzmaEncHandle LzmaEnc_Create(ISzAlloc *
+ return p;
+ }
+
+-void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
+ {
+ alloc->Free(alloc, p->litProbs);
+ alloc->Free(alloc, p->saveState.litProbs);
+@@ -2074,7 +2074,7 @@ SRes LzmaEnc_MemPrepare(CLzmaEncHandle p
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+ }
+
+-void LzmaEnc_Finish(CLzmaEncHandle pp)
++static void LzmaEnc_Finish(CLzmaEncHandle pp)
+ {
+ #ifndef _7ZIP_ST
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+@@ -2108,7 +2108,7 @@ static size_t MyWrite(void *pp, const vo
+ }
+
+
+-UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++static UInt32 __maybe_unused LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
+ {
+ const CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+@@ -2120,7 +2120,7 @@ const Byte *LzmaEnc_GetCurBuf(CLzmaEncHa
+ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+ }
+
+-SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++static SRes __maybe_unused LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
+ {
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+@@ -2248,7 +2248,7 @@ SRes LzmaEnc_MemEncode(CLzmaEncHandle pp
+ return res;
+ }
+
+-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++static __maybe_unused SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+ {
+--- a/include/linux/lzma/LzFind.h
++++ b/include/linux/lzma/LzFind.h
+@@ -55,11 +55,6 @@ typedef struct _CMatchFinder
+
+ #define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
+
+-int MatchFinder_NeedMove(CMatchFinder *p);
+-Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
+-void MatchFinder_MoveBlock(CMatchFinder *p);
+-void MatchFinder_ReadIfRequired(CMatchFinder *p);
+-
+ void MatchFinder_Construct(CMatchFinder *p);
+
+ /* Conditions:
+@@ -70,12 +65,6 @@ int MatchFinder_Create(CMatchFinder *p,
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+ ISzAlloc *alloc);
+ void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
+-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
+-void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
+-
+-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
+- UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
+- UInt32 *distances, UInt32 maxLen);
+
+ /*
+ Conditions:
+@@ -102,12 +91,6 @@ typedef struct _IMatchFinder
+
+ void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
+
+-void MatchFinder_Init(CMatchFinder *p);
+-UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+-UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+-
+ #ifdef __cplusplus
+ }
+ #endif
+--- a/lib/lzma/LzFind.c
++++ b/lib/lzma/LzFind.c
+@@ -42,12 +42,12 @@ static int LzInWindow_Create(CMatchFinde
+ return (p->bufferBase != 0);
+ }
+
+-Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
+-Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++static Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++static Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
+
+-UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
+
+-void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++static void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
+ {
+ p->posLimit -= subValue;
+ p->pos -= subValue;
+@@ -268,7 +268,7 @@ static void MatchFinder_SetLimits(CMatch
+ p->posLimit = p->pos + limit;
+ }
+
+-void MatchFinder_Init(CMatchFinder *p)
++static void MatchFinder_Init(CMatchFinder *p)
+ {
+ UInt32 i;
+ for (i = 0; i < p->hashSizeSum; i++)
+@@ -287,7 +287,7 @@ static UInt32 MatchFinder_GetSubValue(CM
+ return (p->pos - p->historySize - 1) & kNormalizeMask;
+ }
+
+-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++static void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
+ {
+ UInt32 i;
+ for (i = 0; i < numItems; i++)
+@@ -350,7 +350,7 @@ static UInt32 * Hc_GetMatchesSpec(UInt32
+ }
+ }
+
+-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++static UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+ UInt32 *distances, UInt32 maxLen)
+ {
+@@ -492,7 +492,7 @@ static UInt32 Bt2_MatchFinder_GetMatches
+ GET_MATCHES_FOOTER(offset, 1)
+ }
+
+-UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++static __maybe_unused UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+ {
+ UInt32 offset;
+ GET_MATCHES_HEADER(3)
+@@ -632,7 +632,7 @@ static UInt32 Hc4_MatchFinder_GetMatches
+ MOVE_POS_RET
+ }
+
+-UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++static __maybe_unused UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+ {
+ UInt32 offset;
+ GET_MATCHES_HEADER(3)
+@@ -657,7 +657,7 @@ static void Bt2_MatchFinder_Skip(CMatchF
+ while (--num != 0);
+ }
+
+-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++static __maybe_unused void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+ {
+ do
+ {
+@@ -718,7 +718,7 @@ static void Hc4_MatchFinder_Skip(CMatchF
+ while (--num != 0);
+ }
+
+-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++static __maybe_unused void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+ {
+ do
+ {
diff --git a/target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch b/target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch
new file mode 100644
index 0000000..1fac6d1
--- /dev/null
+++ b/target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch
@@ -0,0 +1,132 @@
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -112,6 +112,17 @@ static int jffs2_build_filesystem(struct
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
+
++ if (c->flags & (1 << 7)) {
++ printk("%s(): unlocking the mtd device... ", __func__);
++ if (c->mtd->unlock)
++ c->mtd->unlock(c->mtd, 0, c->mtd->size);
++ printk("done.\n");
++
++ printk("%s(): erasing all blocks after the end marker... ", __func__);
++ jffs2_erase_pending_blocks(c, -1);
++ printk("done.\n");
++ }
++
+ dbg_fsbuild("pass 1 starting\n");
+ c->flags |= JFFS2_SB_FLAG_BUILDING;
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -72,7 +72,7 @@ static int file_dirty(struct jffs2_sb_in
+ return ret;
+ if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
+ return ret;
+- /* Turned wasted size into dirty, since we apparently
++ /* Turned wasted size into dirty, since we apparently
+ think it's recoverable now. */
+ jeb->dirty_size += jeb->wasted_size;
+ c->dirty_size += jeb->wasted_size;
+@@ -147,8 +147,11 @@ int jffs2_scan_medium(struct jffs2_sb_in
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+- buf_size, s);
++ if (c->flags & (1 << 7))
++ ret = BLK_STATE_ALLFF;
++ else
++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
++ buf_size, s);
+
+ if (ret < 0)
+ goto out;
+@@ -403,7 +406,7 @@ static int jffs2_scan_xref_node(struct j
+ if (!ref)
+ return -ENOMEM;
+
+- /* BEFORE jffs2_build_xattr_subsystem() called,
++ /* BEFORE jffs2_build_xattr_subsystem() called,
+ * and AFTER xattr_ref is marked as a dead xref,
+ * ref->xid is used to store 32bit xid, xd is not used
+ * ref->ino is used to store 32bit inode-number, ic is not used
+@@ -476,7 +479,7 @@ static int jffs2_scan_eraseblock (struct
+ struct jffs2_sum_marker *sm;
+ void *sumptr = NULL;
+ uint32_t sumlen;
+-
++
+ if (!buf_size) {
+ /* XIP case. Just look, point at the summary if it's there */
+ sm = (void *)buf + c->sector_size - sizeof(*sm);
+@@ -492,9 +495,9 @@ static int jffs2_scan_eraseblock (struct
+ buf_len = sizeof(*sm);
+
+ /* Read as much as we want into the _end_ of the preallocated buffer */
+- err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
++ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
+ jeb->offset + c->sector_size - buf_len,
+- buf_len);
++ buf_len);
+ if (err)
+ return err;
+
+@@ -513,9 +516,9 @@ static int jffs2_scan_eraseblock (struct
+ }
+ if (buf_len < sumlen) {
+ /* Need to read more so that the entire summary node is present */
+- err = jffs2_fill_scan_buf(c, sumptr,
++ err = jffs2_fill_scan_buf(c, sumptr,
+ jeb->offset + c->sector_size - sumlen,
+- sumlen - buf_len);
++ sumlen - buf_len);
+ if (err)
+ return err;
+ }
+@@ -528,7 +531,7 @@ static int jffs2_scan_eraseblock (struct
+
+ if (buf_size && sumlen > buf_size)
+ kfree(sumptr);
+- /* If it returns with a real error, bail.
++ /* If it returns with a real error, bail.
+ If it returns positive, that's a block classification
+ (i.e. BLK_STATE_xxx) so return that too.
+ If it returns zero, fall through to full scan. */
+@@ -549,6 +552,17 @@ static int jffs2_scan_eraseblock (struct
+ return err;
+ }
+
++ if ((buf[0] == 0xde) &&
++ (buf[1] == 0xad) &&
++ (buf[2] == 0xc0) &&
++ (buf[3] == 0xde)) {
++ /* end of filesystem. erase everything after this point */
++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
++ c->flags |= (1 << 7);
++
++ return BLK_STATE_ALLFF;
++ }
++
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+ max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
+@@ -674,7 +688,7 @@ scan_more:
+ scan_end = buf_len;
+ goto more_empty;
+ }
+-
++
+ /* See how much more there is to read in this eraseblock... */
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ if (!buf_len) {
+@@ -910,7 +924,7 @@ scan_more:
+
+ D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
+ jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
+-
++
+ /* mark_node_obsolete can add to wasted !! */
+ if (jeb->wasted_size) {
+ jeb->dirty_size += jeb->wasted_size;
diff --git a/target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch b/target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch
new file mode 100644
index 0000000..523b89f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch
@@ -0,0 +1,25 @@
+From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001
+From: Jonas Gorski <jonas.gorski@gmail.com>
+Date: Tue, 12 Apr 2011 19:55:41 +0200
+Subject: [PATCH] squashfs: update xz compressor options struct.
+
+Update the xz compressor options struct to match the squashfs userspace
+one.
+---
+ fs/squashfs/xz_wrapper.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+--- a/fs/squashfs/xz_wrapper.c
++++ b/fs/squashfs/xz_wrapper.c
+@@ -39,8 +39,10 @@ struct squashfs_xz {
+ };
+
+ struct comp_opts {
+- __le32 dictionary_size;
+ __le32 flags;
++ __le16 bit_opts;
++ __le16 fb;
++ __le32 dictionary_size;
+ };
+
+ static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
diff --git a/target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch b/target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch
new file mode 100644
index 0000000..92de1cc
--- /dev/null
+++ b/target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch
@@ -0,0 +1,2132 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -1053,6 +1053,27 @@ config NETFILTER_XT_MATCH_STATE
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config NETFILTER_XT_MATCH_LAYER7
++ tristate '"layer7" match support'
++ depends on NETFILTER_XTABLES
++ depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK)
++ depends on NETFILTER_ADVANCED
++ help
++ Say Y if you want to be able to classify connections (and their
++ packets) based on regular expression matching of their application
++ layer data. This is one way to classify applications such as
++ peer-to-peer filesharing systems that do not always use the same
++ port.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config NETFILTER_XT_MATCH_LAYER7_DEBUG
++ bool 'Layer 7 debugging output'
++ depends on NETFILTER_XT_MATCH_LAYER7
++ help
++ Say Y to get lots of debugging output.
++
++
+ config NETFILTER_XT_MATCH_STATISTIC
+ tristate '"statistic" match support'
+ depends on NETFILTER_ADVANCED
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -105,6 +105,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT)
+ obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_SOCKET) += xt_socket.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_LAYER7) += xt_layer7.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
+--- /dev/null
++++ b/net/netfilter/xt_layer7.c
+@@ -0,0 +1,666 @@
++/*
++ Kernel module to match application layer (OSI layer 7) data in connections.
++
++ http://l7-filter.sf.net
++
++ (C) 2003-2009 Matthew Strait and Ethan Sommer.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License
++ as published by the Free Software Foundation; either version
++ 2 of the License, or (at your option) any later version.
++ http://www.gnu.org/licenses/gpl.txt
++
++ Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>,
++ xt_helper.c (C) 2002 Harald Welte and cls_layer7.c (C) 2003 Matthew Strait,
++ Ethan Sommer, Justin Levandoski.
++*/
++
++#include <linux/spinlock.h>
++#include <linux/version.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_conntrack.h>
++#include <net/netfilter/nf_conntrack_core.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++#include <net/netfilter/nf_conntrack_extend.h>
++#include <net/netfilter/nf_conntrack_acct.h>
++#endif
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_layer7.h>
++#include <linux/ctype.h>
++#include <linux/proc_fs.h>
++
++#include "regexp/regexp.c"
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
++MODULE_DESCRIPTION("iptables application layer match module");
++MODULE_ALIAS("ipt_layer7");
++MODULE_VERSION("2.21");
++
++static int maxdatalen = 2048; // this is the default
++module_param(maxdatalen, int, 0444);
++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
++#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
++ #define DPRINTK(format,args...) printk(format,##args)
++#else
++ #define DPRINTK(format,args...)
++#endif
++
++/* Number of packets whose data we look at.
++This can be modified through /proc/net/layer7_numpackets */
++static int num_packets = 10;
++
++static struct pattern_cache {
++ char * regex_string;
++ regexp * pattern;
++ struct pattern_cache * next;
++} * first_pattern_cache = NULL;
++
++DEFINE_SPINLOCK(l7_lock);
++
++static int total_acct_packets(struct nf_conn *ct)
++{
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 26)
++ BUG_ON(ct == NULL);
++ return (ct->counters[IP_CT_DIR_ORIGINAL].packets + ct->counters[IP_CT_DIR_REPLY].packets);
++#else
++ struct nf_conn_counter *acct;
++
++ BUG_ON(ct == NULL);
++ acct = nf_conn_acct_find(ct);
++ if (!acct)
++ return 0;
++ return (atomic64_read(acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(acct[IP_CT_DIR_REPLY].packets));
++#endif
++}
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++/* Converts an unfriendly string into a friendly one by
++replacing unprintables with periods and all whitespace with " ". */
++static char * friendly_print(unsigned char * s)
++{
++ char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
++ int i;
++
++ if(!f) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "friendly_print, bailing.\n");
++ return NULL;
++ }
++
++ for(i = 0; i < strlen(s); i++){
++ if(isprint(s[i]) && s[i] < 128) f[i] = s[i];
++ else if(isspace(s[i])) f[i] = ' ';
++ else f[i] = '.';
++ }
++ f[i] = '\0';
++ return f;
++}
++
++static char dec2hex(int i)
++{
++ switch (i) {
++ case 0 ... 9:
++ return (i + '0');
++ break;
++ case 10 ... 15:
++ return (i - 10 + 'a');
++ break;
++ default:
++ if (net_ratelimit())
++ printk("layer7: Problem in dec2hex\n");
++ return '\0';
++ }
++}
++
++static char * hex_print(unsigned char * s)
++{
++ char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
++ int i;
++
++ if(!g) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in hex_print, "
++ "bailing.\n");
++ return NULL;
++ }
++
++ for(i = 0; i < strlen(s); i++) {
++ g[i*3 ] = dec2hex(s[i]/16);
++ g[i*3 + 1] = dec2hex(s[i]%16);
++ g[i*3 + 2] = ' ';
++ }
++ g[i*3] = '\0';
++
++ return g;
++}
++#endif // DEBUG
++
++/* Use instead of regcomp. As we expect to be seeing the same regexps over and
++over again, it make sense to cache the results. */
++static regexp * compile_and_cache(const char * regex_string,
++ const char * protocol)
++{
++ struct pattern_cache * node = first_pattern_cache;
++ struct pattern_cache * last_pattern_cache = first_pattern_cache;
++ struct pattern_cache * tmp;
++ unsigned int len;
++
++ while (node != NULL) {
++ if (!strcmp(node->regex_string, regex_string))
++ return node->pattern;
++
++ last_pattern_cache = node;/* points at the last non-NULL node */
++ node = node->next;
++ }
++
++ /* If we reach the end of the list, then we have not yet cached
++ the pattern for this regex. Let's do that now.
++ Be paranoid about running out of memory to avoid list corruption. */
++ tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
++
++ if(!tmp) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "compile_and_cache, bailing.\n");
++ return NULL;
++ }
++
++ tmp->regex_string = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
++ tmp->pattern = kmalloc(sizeof(struct regexp), GFP_ATOMIC);
++ tmp->next = NULL;
++
++ if(!tmp->regex_string || !tmp->pattern) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "compile_and_cache, bailing.\n");
++ kfree(tmp->regex_string);
++ kfree(tmp->pattern);
++ kfree(tmp);
++ return NULL;
++ }
++
++ /* Ok. The new node is all ready now. */
++ node = tmp;
++
++ if(first_pattern_cache == NULL) /* list is empty */
++ first_pattern_cache = node; /* make node the beginning */
++ else
++ last_pattern_cache->next = node; /* attach node to the end */
++
++ /* copy the string and compile the regex */
++ len = strlen(regex_string);
++ DPRINTK("About to compile this: \"%s\"\n", regex_string);
++ node->pattern = regcomp((char *)regex_string, &len);
++ if ( !node->pattern ) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: Error compiling regexp "
++ "\"%s\" (%s)\n",
++ regex_string, protocol);
++ /* pattern is now cached as NULL, so we won't try again. */
++ }
++
++ strcpy(node->regex_string, regex_string);
++ return node->pattern;
++}
++
++static int can_handle(const struct sk_buff *skb)
++{
++ if(!ip_hdr(skb)) /* not IP */
++ return 0;
++ if(ip_hdr(skb)->protocol != IPPROTO_TCP &&
++ ip_hdr(skb)->protocol != IPPROTO_UDP &&
++ ip_hdr(skb)->protocol != IPPROTO_ICMP)
++ return 0;
++ return 1;
++}
++
++/* Returns offset the into the skb->data that the application data starts */
++static int app_data_offset(const struct sk_buff *skb)
++{
++ /* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
++ isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
++ int ip_hl = 4*ip_hdr(skb)->ihl;
++
++ if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
++ /* 12 == offset into TCP header for the header length field.
++ Can't get this with skb->h.th->doff because the tcphdr
++ struct doesn't get set when routing (this is confirmed to be
++ true in Netfilter as well as QoS.) */
++ int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
++
++ return ip_hl + tcp_hl;
++ } else if( ip_hdr(skb)->protocol == IPPROTO_UDP ) {
++ return ip_hl + 8; /* UDP header is always 8 bytes */
++ } else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
++ return ip_hl + 8; /* ICMP header is 8 bytes */
++ } else {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: tried to handle unknown "
++ "protocol!\n");
++ return ip_hl + 8; /* something reasonable */
++ }
++}
++
++/* handles whether there's a match when we aren't appending data anymore */
++static int match_no_append(struct nf_conn * conntrack,
++ struct nf_conn * master_conntrack,
++ enum ip_conntrack_info ctinfo,
++ enum ip_conntrack_info master_ctinfo,
++ const struct xt_layer7_info * info)
++{
++ /* If we're in here, throw the app data away */
++ if(master_conntrack->layer7.app_data != NULL) {
++
++ #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++ if(!master_conntrack->layer7.app_proto) {
++ char * f =
++ friendly_print(master_conntrack->layer7.app_data);
++ char * g =
++ hex_print(master_conntrack->layer7.app_data);
++ DPRINTK("\nl7-filter gave up after %d bytes "
++ "(%d packets):\n%s\n",
++ strlen(f), total_acct_packets(master_conntrack), f);
++ kfree(f);
++ DPRINTK("In hex: %s\n", g);
++ kfree(g);
++ }
++ #endif
++
++ kfree(master_conntrack->layer7.app_data);
++ master_conntrack->layer7.app_data = NULL; /* don't free again */
++ }
++
++ if(master_conntrack->layer7.app_proto){
++ /* Here child connections set their .app_proto (for /proc) */
++ if(!conntrack->layer7.app_proto) {
++ conntrack->layer7.app_proto =
++ kmalloc(strlen(master_conntrack->layer7.app_proto)+1,
++ GFP_ATOMIC);
++ if(!conntrack->layer7.app_proto){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory "
++ "in match_no_append, "
++ "bailing.\n");
++ return 1;
++ }
++ strcpy(conntrack->layer7.app_proto,
++ master_conntrack->layer7.app_proto);
++ }
++
++ return (!strcmp(master_conntrack->layer7.app_proto,
++ info->protocol));
++ }
++ else {
++ /* If not classified, set to "unknown" to distinguish from
++ connections that are still being tested. */
++ master_conntrack->layer7.app_proto =
++ kmalloc(strlen("unknown")+1, GFP_ATOMIC);
++ if(!master_conntrack->layer7.app_proto){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "match_no_append, bailing.\n");
++ return 1;
++ }
++ strcpy(master_conntrack->layer7.app_proto, "unknown");
++ return 0;
++ }
++}
++
++/* add the new app data to the conntrack. Return number of bytes added. */
++static int add_data(struct nf_conn * master_conntrack,
++ char * app_data, int appdatalen)
++{
++ int length = 0, i;
++ int oldlength = master_conntrack->layer7.app_data_len;
++
++ /* This is a fix for a race condition by Deti Fliegl. However, I'm not
++ clear on whether the race condition exists or whether this really
++ fixes it. I might just be being dense... Anyway, if it's not really
++ a fix, all it does is waste a very small amount of time. */
++ if(!master_conntrack->layer7.app_data) return 0;
++
++ /* Strip nulls. Make everything lower case (our regex lib doesn't
++ do case insensitivity). Add it to the end of the current data. */
++ for(i = 0; i < maxdatalen-oldlength-1 &&
++ i < appdatalen; i++) {
++ if(app_data[i] != '\0') {
++ /* the kernel version of tolower mungs 'upper ascii' */
++ master_conntrack->layer7.app_data[length+oldlength] =
++ isascii(app_data[i])?
++ tolower(app_data[i]) : app_data[i];
++ length++;
++ }
++ }
++
++ master_conntrack->layer7.app_data[length+oldlength] = '\0';
++ master_conntrack->layer7.app_data_len = length + oldlength;
++
++ return length;
++}
++
++/* taken from drivers/video/modedb.c */
++static int my_atoi(const char *s)
++{
++ int val = 0;
++
++ for (;; s++) {
++ switch (*s) {
++ case '0'...'9':
++ val = 10*val+(*s-'0');
++ break;
++ default:
++ return val;
++ }
++ }
++}
++
++/* write out num_packets to userland. */
++static int layer7_read_proc(char* page, char ** start, off_t off, int count,
++ int* eof, void * data)
++{
++ if(num_packets > 99 && net_ratelimit())
++ printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n");
++
++ page[0] = num_packets/10 + '0';
++ page[1] = num_packets%10 + '0';
++ page[2] = '\n';
++ page[3] = '\0';
++
++ *eof=1;
++
++ return 3;
++}
++
++/* Read in num_packets from userland */
++static int layer7_write_proc(struct file* file, const char* buffer,
++ unsigned long count, void *data)
++{
++ char * foo = kmalloc(count, GFP_ATOMIC);
++
++ if(!foo){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory, bailing. "
++ "num_packets unchanged.\n");
++ return count;
++ }
++
++ if(copy_from_user(foo, buffer, count)) {
++ return -EFAULT;
++ }
++
++
++ num_packets = my_atoi(foo);
++ kfree (foo);
++
++ /* This has an arbitrary limit to make the math easier. I'm lazy.
++ But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
++ if(num_packets > 99) {
++ printk(KERN_WARNING "layer7: num_packets can't be > 99.\n");
++ num_packets = 99;
++ } else if(num_packets < 1) {
++ printk(KERN_WARNING "layer7: num_packets can't be < 1.\n");
++ num_packets = 1;
++ }
++
++ return count;
++}
++
++static bool
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++match(const struct sk_buff *skbin, const struct xt_match_param *par)
++#else
++match(const struct sk_buff *skbin,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#endif
++{
++ /* sidestep const without getting a compiler warning... */
++ struct sk_buff * skb = (struct sk_buff *)skbin;
++
++ const struct xt_layer7_info * info =
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ par->matchinfo;
++ #else
++ matchinfo;
++ #endif
++
++ enum ip_conntrack_info master_ctinfo, ctinfo;
++ struct nf_conn *master_conntrack, *conntrack;
++ unsigned char * app_data;
++ unsigned int pattern_result, appdatalen;
++ regexp * comppattern;
++
++ /* Be paranoid/incompetent - lock the entire match function. */
++ spin_lock_bh(&l7_lock);
++
++ if(!can_handle(skb)){
++ DPRINTK("layer7: This is some protocol I can't handle.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++
++ /* Treat parent & all its children together as one connection, except
++ for the purpose of setting conntrack->layer7.app_proto in the actual
++ connection. This makes /proc/net/ip_conntrack more satisfying. */
++ if(!(conntrack = nf_ct_get(skb, &ctinfo)) ||
++ !(master_conntrack=nf_ct_get(skb,&master_ctinfo))){
++ DPRINTK("layer7: couldn't get conntrack.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++
++ /* Try to get a master conntrack (and its master etc) for FTP, etc. */
++ while (master_ct(master_conntrack) != NULL)
++ master_conntrack = master_ct(master_conntrack);
++
++ /* if we've classified it or seen too many packets */
++ if(total_acct_packets(master_conntrack) > num_packets ||
++ master_conntrack->layer7.app_proto) {
++
++ pattern_result = match_no_append(conntrack, master_conntrack,
++ ctinfo, master_ctinfo, info);
++
++ /* skb->cb[0] == seen. Don't do things twice if there are
++ multiple l7 rules. I'm not sure that using cb for this purpose
++ is correct, even though it says "put your private variables
++ there". But it doesn't look like it is being used for anything
++ else in the skbs that make it here. */
++ skb->cb[0] = 1; /* marking it seen here's probably irrelevant */
++
++ spin_unlock_bh(&l7_lock);
++ return (pattern_result ^ info->invert);
++ }
++
++ if(skb_is_nonlinear(skb)){
++ if(skb_linearize(skb) != 0){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: failed to linearize "
++ "packet, bailing.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++ }
++
++ /* now that the skb is linearized, it's safe to set these. */
++ app_data = skb->data + app_data_offset(skb);
++ appdatalen = skb_tail_pointer(skb) - app_data;
++
++ /* the return value gets checked later, when we're ready to use it */
++ comppattern = compile_and_cache(info->pattern, info->protocol);
++
++ /* On the first packet of a connection, allocate space for app data */
++ if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
++ !master_conntrack->layer7.app_data){
++ master_conntrack->layer7.app_data =
++ kmalloc(maxdatalen, GFP_ATOMIC);
++ if(!master_conntrack->layer7.app_data){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "match, bailing.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++
++ master_conntrack->layer7.app_data[0] = '\0';
++ }
++
++ /* Can be here, but unallocated, if numpackets is increased near
++ the beginning of a connection */
++ if(master_conntrack->layer7.app_data == NULL){
++ spin_unlock_bh(&l7_lock);
++ return info->invert; /* unmatched */
++ }
++
++ if(!skb->cb[0]){
++ int newbytes;
++ newbytes = add_data(master_conntrack, app_data, appdatalen);
++
++ if(newbytes == 0) { /* didn't add any data */
++ skb->cb[0] = 1;
++ /* Didn't match before, not going to match now */
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++ }
++
++ /* If looking for "unknown", then never match. "Unknown" means that
++ we've given up; we're still trying with these packets. */
++ if(!strcmp(info->protocol, "unknown")) {
++ pattern_result = 0;
++ /* If looking for "unset", then always match. "Unset" means that we
++ haven't yet classified the connection. */
++ } else if(!strcmp(info->protocol, "unset")) {
++ pattern_result = 2;
++ DPRINTK("layer7: matched unset: not yet classified "
++ "(%d/%d packets)\n",
++ total_acct_packets(master_conntrack), num_packets);
++ /* If the regexp failed to compile, don't bother running it */
++ } else if(comppattern &&
++ regexec(comppattern, master_conntrack->layer7.app_data)){
++ DPRINTK("layer7: matched %s\n", info->protocol);
++ pattern_result = 1;
++ } else pattern_result = 0;
++
++ if(pattern_result == 1) {
++ master_conntrack->layer7.app_proto =
++ kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
++ if(!master_conntrack->layer7.app_proto){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "match, bailing.\n");
++ spin_unlock_bh(&l7_lock);
++ return (pattern_result ^ info->invert);
++ }
++ strcpy(master_conntrack->layer7.app_proto, info->protocol);
++ } else if(pattern_result > 1) { /* cleanup from "unset" */
++ pattern_result = 1;
++ }
++
++ /* mark the packet seen */
++ skb->cb[0] = 1;
++
++ spin_unlock_bh(&l7_lock);
++ return (pattern_result ^ info->invert);
++}
++
++// load nf_conntrack_ipv4
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++static bool check(const struct xt_mtchk_param *par)
++{
++ if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
++ printk(KERN_WARNING "can't load conntrack support for "
++ "proto=%d\n", par->match->family);
++#else
++static bool check(const char *tablename, const void *inf,
++ const struct xt_match *match, void *matchinfo,
++ unsigned int hook_mask)
++{
++ if (nf_ct_l3proto_try_module_get(match->family) < 0) {
++ printk(KERN_WARNING "can't load conntrack support for "
++ "proto=%d\n", match->family);
++#endif
++ return 0;
++ }
++ return 1;
++}
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ static void destroy(const struct xt_mtdtor_param *par)
++ {
++ nf_ct_l3proto_module_put(par->match->family);
++ }
++#else
++ static void destroy(const struct xt_match *match, void *matchinfo)
++ {
++ nf_ct_l3proto_module_put(match->family);
++ }
++#endif
++
++static struct xt_match xt_layer7_match[] __read_mostly = {
++{
++ .name = "layer7",
++ .family = AF_INET,
++ .checkentry = check,
++ .match = match,
++ .destroy = destroy,
++ .matchsize = sizeof(struct xt_layer7_info),
++ .me = THIS_MODULE
++}
++};
++
++static void layer7_cleanup_proc(void)
++{
++ remove_proc_entry("layer7_numpackets", init_net.proc_net);
++}
++
++/* register the proc file */
++static void layer7_init_proc(void)
++{
++ struct proc_dir_entry* entry;
++ entry = create_proc_entry("layer7_numpackets", 0644, init_net.proc_net);
++ entry->read_proc = layer7_read_proc;
++ entry->write_proc = layer7_write_proc;
++}
++
++static int __init xt_layer7_init(void)
++{
++ need_conntrack();
++
++ layer7_init_proc();
++ if(maxdatalen < 1) {
++ printk(KERN_WARNING "layer7: maxdatalen can't be < 1, "
++ "using 1\n");
++ maxdatalen = 1;
++ }
++ /* This is not a hard limit. It's just here to prevent people from
++ bringing their slow machines to a grinding halt. */
++ else if(maxdatalen > 65536) {
++ printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, "
++ "using 65536\n");
++ maxdatalen = 65536;
++ }
++ return xt_register_matches(xt_layer7_match,
++ ARRAY_SIZE(xt_layer7_match));
++}
++
++static void __exit xt_layer7_fini(void)
++{
++ layer7_cleanup_proc();
++ xt_unregister_matches(xt_layer7_match, ARRAY_SIZE(xt_layer7_match));
++}
++
++module_init(xt_layer7_init);
++module_exit(xt_layer7_fini);
+--- /dev/null
++++ b/net/netfilter/regexp/regexp.c
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c 1.3 of 18 April 87
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions. Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt. Lets it work in both kernel and user space.
++(So iptables can use it, for instance.) Yea, it goes both ways... */
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++ #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++ printk("<3>Regexp: %s\n", s);
++ /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases. They are:
++ *
++ * regstart char that must begin a match; '\0' if none obvious
++ * reganch is the match anchored (at beginning-of-line only)?
++ * regmust string (pointer into program) that match must include, or NULL
++ * regmlen length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot. Regmust permits fast rejection
++ * of lines that cannot possibly match. The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup). Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program". This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology). Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand. "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives. (Here we
++ * have one of the subtle syntax dependencies: an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.) The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM. In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure: the tail of the branch connects
++ * to the thing following the set of BRANCHes.) The opcodes are:
++ */
++
++/* definition number opnd? meaning */
++#define END 0 /* no End of program. */
++#define BOL 1 /* no Match "" at beginning of line. */
++#define EOL 2 /* no Match "" at end of line. */
++#define ANY 3 /* no Match any one character. */
++#define ANYOF 4 /* str Match any character in this string. */
++#define ANYBUT 5 /* str Match any character not in this string. */
++#define BRANCH 6 /* node Match this alternative, or the next... */
++#define BACK 7 /* no Match "", "next" ptr points backward. */
++#define EXACTLY 8 /* str Match this string. */
++#define NOTHING 9 /* no Match empty string. */
++#define STAR 10 /* node Match this (simple) thing 0 or more times. */
++#define PLUS 11 /* node Match this (simple) thing 1 or more times. */
++#define OPEN 20 /* no Mark this point in input as start of #n. */
++ /* OPEN+1 is number 1, etc. */
++#define CLOSE 30 /* no Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH The set of branches constituting a single choice are hooked
++ * together with their "next" pointers, since precedence prevents
++ * anything being concatenated to any individual branch. The
++ * "next" pointer of the last BRANCH in a choice points to the
++ * thing following the whole choice. This is also where the
++ * final "next" pointer of each individual branch points; each
++ * branch starts with the operand node of a BRANCH node.
++ *
++ * BACK Normal "next" pointers all implicitly point forward; BACK
++ * exists to make loop structures possible.
++ *
++ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular
++ * BRANCH structures using BACK. Simple cases (one character
++ * per match) are implemented with STAR and PLUS for speed
++ * and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE ...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first. The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node. (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define OP(p) (*(p))
++#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define OPERAND(p) ((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#define FAIL(m) { regerror(m); return(NULL); }
++#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?')
++#define META "^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define HASWIDTH 01 /* Known never to match null string. */
++#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */
++#define SPSTART 04 /* Starts with * or +. */
++#define WORST 0 /* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput; /* String-input pointer. */
++char *regbol; /* Beginning of input, for ^ check. */
++char **regstartp; /* Pointer to startp array. */
++char **regendp; /* Ditto for endp. */
++char *regparse; /* Input-scan pointer. */
++int regnpar; /* () count. */
++char regdummy;
++char *regcode; /* Code-emit pointer; &regdummy = don't. */
++long regsize; /* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define STATIC static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++ char *scan1;
++ char *scan2;
++ int count;
++
++ count = 0;
++ for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++ for (scan2 = (char *)s2; *scan2 != '\0';) /* ++ moved down. */
++ if (*scan1 == *scan2++)
++ return(count);
++ count++;
++ }
++ return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code. So we cheat: we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it. (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++ register regexp *r;
++ register char *scan;
++ register char *longest;
++ register int len;
++ int flags;
++ struct match_globals g;
++
++ /* commented out by ethan
++ extern char *malloc();
++ */
++
++ if (exp == NULL)
++ FAIL("NULL argument");
++
++ /* First pass: determine size, legality. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regsize = 0L;
++ g.regcode = &g.regdummy;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Small enough for pointer-storage convention? */
++ if (g.regsize >= 32767L) /* Probably could be 65535L. */
++ FAIL("regexp too big");
++
++ /* Allocate space. */
++ *patternsize=sizeof(regexp) + (unsigned)g.regsize;
++ r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++ if (r == NULL)
++ FAIL("out of space");
++
++ /* Second pass: emit code. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regcode = r->program;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Dig out information for optimizations. */
++ r->regstart = '\0'; /* Worst-case defaults. */
++ r->reganch = 0;
++ r->regmust = NULL;
++ r->regmlen = 0;
++ scan = r->program+1; /* First BRANCH. */
++ if (OP(regnext(&g, scan)) == END) { /* Only one top-level choice. */
++ scan = OPERAND(scan);
++
++ /* Starting-point info. */
++ if (OP(scan) == EXACTLY)
++ r->regstart = *OPERAND(scan);
++ else if (OP(scan) == BOL)
++ r->reganch++;
++
++ /*
++ * If there's something expensive in the r.e., find the
++ * longest literal string that must appear and make it the
++ * regmust. Resolve ties in favor of later strings, since
++ * the regstart check works with the beginning of the r.e.
++ * and avoiding duplication strengthens checking. Not a
++ * strong reason, but sufficient in the absence of others.
++ */
++ if (flags&SPSTART) {
++ longest = NULL;
++ len = 0;
++ for (; scan != NULL; scan = regnext(&g, scan))
++ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++ longest = OPERAND(scan);
++ len = strlen(OPERAND(scan));
++ }
++ r->regmust = longest;
++ r->regmlen = len;
++ }
++ }
++
++ return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++ register char *ret;
++ register char *br;
++ register char *ender;
++ register int parno = 0; /* 0 makes gcc happy */
++ int flags;
++
++ *flagp = HASWIDTH; /* Tentatively. */
++
++ /* Make an OPEN node, if parenthesized. */
++ if (paren) {
++ if (g->regnpar >= NSUBEXP)
++ FAIL("too many ()");
++ parno = g->regnpar;
++ g->regnpar++;
++ ret = regnode(g, OPEN+parno);
++ } else
++ ret = NULL;
++
++ /* Pick up the branches, linking them together. */
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ if (ret != NULL)
++ regtail(g, ret, br); /* OPEN -> first. */
++ else
++ ret = br;
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ while (*g->regparse == '|') {
++ g->regparse++;
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ regtail(g, ret, br); /* BRANCH -> BRANCH. */
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ }
++
++ /* Make a closing node, and hook it on the end. */
++ ender = regnode(g, (paren) ? CLOSE+parno : END);
++ regtail(g, ret, ender);
++
++ /* Hook the tails of the branches to the closing node. */
++ for (br = ret; br != NULL; br = regnext(g, br))
++ regoptail(g, br, ender);
++
++ /* Check for proper termination. */
++ if (paren && *g->regparse++ != ')') {
++ FAIL("unmatched ()");
++ } else if (!paren && *g->regparse != '\0') {
++ if (*g->regparse == ')') {
++ FAIL("unmatched ()");
++ } else
++ FAIL("junk on end"); /* "Can't happen". */
++ /* NOTREACHED */
++ }
++
++ return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char *chain;
++ register char *latest;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ ret = regnode(g, BRANCH);
++ chain = NULL;
++ while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++ latest = regpiece(g, &flags);
++ if (latest == NULL)
++ return(NULL);
++ *flagp |= flags&HASWIDTH;
++ if (chain == NULL) /* First piece. */
++ *flagp |= flags&SPSTART;
++ else
++ regtail(g, chain, latest);
++ chain = latest;
++ }
++ if (chain == NULL) /* Loop ran zero times. */
++ (void) regnode(g, NOTHING);
++
++ return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized: they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char op;
++ register char *next;
++ int flags;
++
++ ret = regatom(g, &flags);
++ if (ret == NULL)
++ return(NULL);
++
++ op = *g->regparse;
++ if (!ISMULT(op)) {
++ *flagp = flags;
++ return(ret);
++ }
++
++ if (!(flags&HASWIDTH) && op != '?')
++ FAIL("*+ operand could be empty");
++ *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++ if (op == '*' && (flags&SIMPLE))
++ reginsert(g, STAR, ret);
++ else if (op == '*') {
++ /* Emit x* as (x&|), where & means "self". */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regoptail(g, ret, regnode(g, BACK)); /* and loop */
++ regoptail(g, ret, ret); /* back */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '+' && (flags&SIMPLE))
++ reginsert(g, PLUS, ret);
++ else if (op == '+') {
++ /* Emit x+ as x(&|), where & means "self". */
++ next = regnode(g, BRANCH); /* Either */
++ regtail(g, ret, next);
++ regtail(g, regnode(g, BACK), ret); /* loop back */
++ regtail(g, next, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '?') {
++ /* Emit x? as (x|) */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ next = regnode(g, NOTHING); /* null. */
++ regtail(g, ret, next);
++ regoptail(g, ret, next);
++ }
++ g->regparse++;
++ if (ISMULT(*g->regparse))
++ FAIL("nested *?+");
++
++ return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization: gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run. Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ switch (*g->regparse++) {
++ case '^':
++ ret = regnode(g, BOL);
++ break;
++ case '$':
++ ret = regnode(g, EOL);
++ break;
++ case '.':
++ ret = regnode(g, ANY);
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ case '[': {
++ register int class;
++ register int classend;
++
++ if (*g->regparse == '^') { /* Complement of range. */
++ ret = regnode(g, ANYBUT);
++ g->regparse++;
++ } else
++ ret = regnode(g, ANYOF);
++ if (*g->regparse == ']' || *g->regparse == '-')
++ regc(g, *g->regparse++);
++ while (*g->regparse != '\0' && *g->regparse != ']') {
++ if (*g->regparse == '-') {
++ g->regparse++;
++ if (*g->regparse == ']' || *g->regparse == '\0')
++ regc(g, '-');
++ else {
++ class = UCHARAT(g->regparse-2)+1;
++ classend = UCHARAT(g->regparse);
++ if (class > classend+1)
++ FAIL("invalid [] range");
++ for (; class <= classend; class++)
++ regc(g, class);
++ g->regparse++;
++ }
++ } else
++ regc(g, *g->regparse++);
++ }
++ regc(g, '\0');
++ if (*g->regparse != ']')
++ FAIL("unmatched []");
++ g->regparse++;
++ *flagp |= HASWIDTH|SIMPLE;
++ }
++ break;
++ case '(':
++ ret = reg(g, 1, &flags);
++ if (ret == NULL)
++ return(NULL);
++ *flagp |= flags&(HASWIDTH|SPSTART);
++ break;
++ case '\0':
++ case '|':
++ case ')':
++ FAIL("internal urp"); /* Supposed to be caught earlier. */
++ break;
++ case '?':
++ case '+':
++ case '*':
++ FAIL("?+* follows nothing");
++ break;
++ case '\\':
++ if (*g->regparse == '\0')
++ FAIL("trailing \\");
++ ret = regnode(g, EXACTLY);
++ regc(g, *g->regparse++);
++ regc(g, '\0');
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ default: {
++ register int len;
++ register char ender;
++
++ g->regparse--;
++ len = my_strcspn((const char *)g->regparse, (const char *)META);
++ if (len <= 0)
++ FAIL("internal disaster");
++ ender = *(g->regparse+len);
++ if (len > 1 && ISMULT(ender))
++ len--; /* Back off clear of ?+* operand. */
++ *flagp |= HASWIDTH;
++ if (len == 1)
++ *flagp |= SIMPLE;
++ ret = regnode(g, EXACTLY);
++ while (len > 0) {
++ regc(g, *g->regparse++);
++ len--;
++ }
++ regc(g, '\0');
++ }
++ break;
++ }
++
++ return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char * /* Location. */
++regnode(struct match_globals *g, char op)
++{
++ register char *ret;
++ register char *ptr;
++
++ ret = g->regcode;
++ if (ret == &g->regdummy) {
++ g->regsize += 3;
++ return(ret);
++ }
++
++ ptr = ret;
++ *ptr++ = op;
++ *ptr++ = '\0'; /* Null "next" pointer. */
++ *ptr++ = '\0';
++ g->regcode = ptr;
++
++ return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++ if (g->regcode != &g->regdummy)
++ *g->regcode++ = b;
++ else
++ g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++ register char *src;
++ register char *dst;
++ register char *place;
++
++ if (g->regcode == &g->regdummy) {
++ g->regsize += 3;
++ return;
++ }
++
++ src = g->regcode;
++ g->regcode += 3;
++ dst = g->regcode;
++ while (src > opnd)
++ *--dst = *--src;
++
++ place = opnd; /* Op node, where operand used to be. */
++ *place++ = op;
++ *place++ = '\0';
++ *place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++ register char *scan;
++ register char *temp;
++ register int offset;
++
++ if (p == &g->regdummy)
++ return;
++
++ /* Find last node. */
++ scan = p;
++ for (;;) {
++ temp = regnext(g, scan);
++ if (temp == NULL)
++ break;
++ scan = temp;
++ }
++
++ if (OP(scan) == BACK)
++ offset = scan - val;
++ else
++ offset = val - scan;
++ *(scan+1) = (offset>>8)&0377;
++ *(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++ /* "Operandless" and "op != BRANCH" are synonymous in practice. */
++ if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++ return;
++ regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++ register char *s;
++ struct match_globals g;
++
++ /* Be paranoid... */
++ if (prog == NULL || string == NULL) {
++ printk("<3>Regexp: NULL parameter\n");
++ return(0);
++ }
++
++ /* Check validity of program. */
++ if (UCHARAT(prog->program) != MAGIC) {
++ printk("<3>Regexp: corrupted program\n");
++ return(0);
++ }
++
++ /* If there is a "must appear" string, look for it. */
++ if (prog->regmust != NULL) {
++ s = string;
++ while ((s = strchr(s, prog->regmust[0])) != NULL) {
++ if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++ break; /* Found it. */
++ s++;
++ }
++ if (s == NULL) /* Not present. */
++ return(0);
++ }
++
++ /* Mark beginning of line for ^ . */
++ g.regbol = string;
++
++ /* Simplest case: anchored match need be tried only once. */
++ if (prog->reganch)
++ return(regtry(&g, prog, string));
++
++ /* Messy cases: unanchored match. */
++ s = string;
++ if (prog->regstart != '\0')
++ /* We know what char it must start with. */
++ while ((s = strchr(s, prog->regstart)) != NULL) {
++ if (regtry(&g, prog, s))
++ return(1);
++ s++;
++ }
++ else
++ /* We don't -- general case. */
++ do {
++ if (regtry(&g, prog, s))
++ return(1);
++ } while (*s++ != '\0');
++
++ /* Failure. */
++ return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int /* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++ register int i;
++ register char **sp;
++ register char **ep;
++
++ g->reginput = string;
++ g->regstartp = prog->startp;
++ g->regendp = prog->endp;
++
++ sp = prog->startp;
++ ep = prog->endp;
++ for (i = NSUBEXP; i > 0; i--) {
++ *sp++ = NULL;
++ *ep++ = NULL;
++ }
++ if (regmatch(g, prog->program + 1)) {
++ prog->startp[0] = string;
++ prog->endp[0] = g->reginput;
++ return(1);
++ } else
++ return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple: check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly. In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int /* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++ register char *scan = prog; /* Current node. */
++ char *next; /* Next node. */
++
++#ifdef DEBUG
++ if (scan != NULL && regnarrate)
++ fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++ while (scan != NULL) {
++#ifdef DEBUG
++ if (regnarrate)
++ fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++ next = regnext(g, scan);
++
++ switch (OP(scan)) {
++ case BOL:
++ if (g->reginput != g->regbol)
++ return(0);
++ break;
++ case EOL:
++ if (*g->reginput != '\0')
++ return(0);
++ break;
++ case ANY:
++ if (*g->reginput == '\0')
++ return(0);
++ g->reginput++;
++ break;
++ case EXACTLY: {
++ register int len;
++ register char *opnd;
++
++ opnd = OPERAND(scan);
++ /* Inline the first character, for speed. */
++ if (*opnd != *g->reginput)
++ return(0);
++ len = strlen(opnd);
++ if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++ return(0);
++ g->reginput += len;
++ }
++ break;
++ case ANYOF:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case ANYBUT:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case NOTHING:
++ case BACK:
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9: {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - OPEN;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set startp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regstartp[no] == NULL)
++ g->regstartp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - CLOSE;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set endp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regendp[no] == NULL)
++ g->regendp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case BRANCH: {
++ register char *save;
++
++ if (OP(next) != BRANCH) /* No choice. */
++ next = OPERAND(scan); /* Avoid recursion. */
++ else {
++ do {
++ save = g->reginput;
++ if (regmatch(g, OPERAND(scan)))
++ return(1);
++ g->reginput = save;
++ scan = regnext(g, scan);
++ } while (scan != NULL && OP(scan) == BRANCH);
++ return(0);
++ /* NOTREACHED */
++ }
++ }
++ break;
++ case STAR:
++ case PLUS: {
++ register char nextch;
++ register int no;
++ register char *save;
++ register int min;
++
++ /*
++ * Lookahead to avoid useless match attempts
++ * when we know what character comes next.
++ */
++ nextch = '\0';
++ if (OP(next) == EXACTLY)
++ nextch = *OPERAND(next);
++ min = (OP(scan) == STAR) ? 0 : 1;
++ save = g->reginput;
++ no = regrepeat(g, OPERAND(scan));
++ while (no >= min) {
++ /* If it could work, try it. */
++ if (nextch == '\0' || *g->reginput == nextch)
++ if (regmatch(g, next))
++ return(1);
++ /* Couldn't or didn't -- back up. */
++ no--;
++ g->reginput = save + no;
++ }
++ return(0);
++ }
++ break;
++ case END:
++ return(1); /* Success! */
++ break;
++ default:
++ printk("<3>Regexp: memory corruption\n");
++ return(0);
++ break;
++ }
++
++ scan = next;
++ }
++
++ /*
++ * We get here only if there's trouble -- normally "case END" is
++ * the terminating point.
++ */
++ printk("<3>Regexp: corrupted pointers\n");
++ return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++ register int count = 0;
++ register char *scan;
++ register char *opnd;
++
++ scan = g->reginput;
++ opnd = OPERAND(p);
++ switch (OP(p)) {
++ case ANY:
++ count = strlen(scan);
++ scan += count;
++ break;
++ case EXACTLY:
++ while (*opnd == *scan) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYOF:
++ while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYBUT:
++ while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ default: /* Oh dear. Called inappropriately. */
++ printk("<3>Regexp: internal foulup\n");
++ count = 0; /* Best compromise. */
++ break;
++ }
++ g->reginput = scan;
++
++ return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++ register int offset;
++
++ if (p == &g->regdummy)
++ return(NULL);
++
++ offset = NEXT(p);
++ if (offset == 0)
++ return(NULL);
++
++ if (OP(p) == BACK)
++ return(p-offset);
++ else
++ return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++ register char *s;
++ register char op = EXACTLY; /* Arbitrary non-END op. */
++ register char *next;
++ /* extern char *strchr(); */
++
++
++ s = r->program + 1;
++ while (op != END) { /* While that wasn't END last time... */
++ op = OP(s);
++ printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */
++ next = regnext(s);
++ if (next == NULL) /* Next ptr. */
++ printf("(0)");
++ else
++ printf("(%d)", (s-r->program)+(next-s));
++ s += 3;
++ if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++ /* Literal string, where present. */
++ while (*s != '\0') {
++ putchar(*s);
++ s++;
++ }
++ s++;
++ }
++ putchar('\n');
++ }
++
++ /* Header fields of interest. */
++ if (r->regstart != '\0')
++ printf("start `%c' ", r->regstart);
++ if (r->reganch)
++ printf("anchored ");
++ if (r->regmust != NULL)
++ printf("must have \"%s\"", r->regmust);
++ printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++ register char *p;
++ static char buf[BUFLEN];
++
++ strcpy(buf, ":");
++
++ switch (OP(op)) {
++ case BOL:
++ p = "BOL";
++ break;
++ case EOL:
++ p = "EOL";
++ break;
++ case ANY:
++ p = "ANY";
++ break;
++ case ANYOF:
++ p = "ANYOF";
++ break;
++ case ANYBUT:
++ p = "ANYBUT";
++ break;
++ case BRANCH:
++ p = "BRANCH";
++ break;
++ case EXACTLY:
++ p = "EXACTLY";
++ break;
++ case NOTHING:
++ p = "NOTHING";
++ break;
++ case BACK:
++ p = "BACK";
++ break;
++ case END:
++ p = "END";
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++ p = NULL;
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++ p = NULL;
++ break;
++ case STAR:
++ p = "STAR";
++ break;
++ case PLUS:
++ p = "PLUS";
++ break;
++ default:
++ printk("<3>Regexp: corrupted opcode\n");
++ break;
++ }
++ if (p != NULL)
++ strncat(buf, p, BUFLEN-strlen(buf));
++ return(buf);
++}
++#endif
++
++
+--- /dev/null
++++ b/net/netfilter/regexp/regexp.h
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10. If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP 10
++typedef struct regexp {
++ char *startp[NSUBEXP];
++ char *endp[NSUBEXP];
++ char regstart; /* Internal use only. */
++ char reganch; /* Internal use only. */
++ char *regmust; /* Internal use only. */
++ int regmlen; /* Internal use only. */
++ char program[1]; /* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+--- /dev/null
++++ b/net/netfilter/regexp/regmagic.h
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define MAGIC 0234
+--- /dev/null
++++ b/net/netfilter/regexp/regsub.c
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c 1.3 of 2 April 86
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++// printk("regexp(3): %s", s);
++// /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++ register char *src;
++ register char *dst;
++ register char c;
++ register int no;
++ register int len;
++
++ /* Not necessary and gcc doesn't like it -MLS */
++ /*extern char *strncpy();*/
++
++ if (prog == NULL || source == NULL || dest == NULL) {
++ regerror("NULL parm to regsub");
++ return;
++ }
++ if (UCHARAT(prog->program) != MAGIC) {
++ regerror("damaged regexp fed to regsub");
++ return;
++ }
++
++ src = source;
++ dst = dest;
++ while ((c = *src++) != '\0') {
++ if (c == '&')
++ no = 0;
++ else if (c == '\\' && '0' <= *src && *src <= '9')
++ no = *src++ - '0';
++ else
++ no = -1;
++
++ if (no < 0) { /* Ordinary character. */
++ if (c == '\\' && (*src == '\\' || *src == '&'))
++ c = *src++;
++ *dst++ = c;
++ } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++ len = prog->endp[no] - prog->startp[no];
++ (void) strncpy(dst, prog->startp[no], len);
++ dst += len;
++ if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */
++ regerror("damaged match string");
++ return;
++ }
++ }
++ }
++ *dst++ = '\0';
++}
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -214,6 +214,14 @@ destroy_conntrack(struct nf_conntrack *n
+ * too. */
+ nf_ct_remove_expectations(ct);
+
++ #if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++ if(ct->layer7.app_proto)
++ kfree(ct->layer7.app_proto);
++ if(ct->layer7.app_data)
++ kfree(ct->layer7.app_data);
++ #endif
++
++
+ /* We overload first tuple to link into unconfirmed list. */
+ if (!nf_ct_is_confirmed(ct)) {
+ BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -239,6 +239,12 @@ static int ct_seq_show(struct seq_file *
+ if (ct_show_delta_time(s, ct))
+ goto release;
+
++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++ if(ct->layer7.app_proto &&
++ seq_printf(s, "l7proto=%s ", ct->layer7.app_proto))
++ return -ENOSPC;
++#endif
++
+ if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
+ goto release;
+
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -134,6 +134,22 @@ struct nf_conn {
+ struct net *ct_net;
+ #endif
+
++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || \
++ defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++ struct {
++ /*
++ * e.g. "http". NULL before decision. "unknown" after decision
++ * if no match.
++ */
++ char *app_proto;
++ /*
++ * application layer data so far. NULL after match decision.
++ */
++ char *app_data;
++ unsigned int app_data_len;
++ } layer7;
++#endif
++
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
+ };
+--- /dev/null
++++ b/include/linux/netfilter/xt_layer7.h
+@@ -0,0 +1,13 @@
++#ifndef _XT_LAYER7_H
++#define _XT_LAYER7_H
++
++#define MAX_PATTERN_LEN 8192
++#define MAX_PROTOCOL_LEN 256
++
++struct xt_layer7_info {
++ char protocol[MAX_PROTOCOL_LEN];
++ char pattern[MAX_PATTERN_LEN];
++ u_int8_t invert;
++};
++
++#endif /* _XT_LAYER7_H */
diff --git a/target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch b/target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch
new file mode 100644
index 0000000..f65e301
--- /dev/null
+++ b/target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch
@@ -0,0 +1,108 @@
+--- a/include/linux/netfilter/xt_layer7.h
++++ b/include/linux/netfilter/xt_layer7.h
+@@ -8,6 +8,7 @@ struct xt_layer7_info {
+ char protocol[MAX_PROTOCOL_LEN];
+ char pattern[MAX_PATTERN_LEN];
+ u_int8_t invert;
++ u_int8_t pkt;
+ };
+
+ #endif /* _XT_LAYER7_H */
+--- a/net/netfilter/xt_layer7.c
++++ b/net/netfilter/xt_layer7.c
+@@ -314,33 +314,35 @@ static int match_no_append(struct nf_con
+ }
+
+ /* add the new app data to the conntrack. Return number of bytes added. */
+-static int add_data(struct nf_conn * master_conntrack,
+- char * app_data, int appdatalen)
++static int add_datastr(char *target, int offset, char *app_data, int len)
+ {
+ int length = 0, i;
+- int oldlength = master_conntrack->layer7.app_data_len;
+-
+- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
+- clear on whether the race condition exists or whether this really
+- fixes it. I might just be being dense... Anyway, if it's not really
+- a fix, all it does is waste a very small amount of time. */
+- if(!master_conntrack->layer7.app_data) return 0;
++ if (!target) return 0;
+
+ /* Strip nulls. Make everything lower case (our regex lib doesn't
+ do case insensitivity). Add it to the end of the current data. */
+- for(i = 0; i < maxdatalen-oldlength-1 &&
+- i < appdatalen; i++) {
++ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
+ if(app_data[i] != '\0') {
+ /* the kernel version of tolower mungs 'upper ascii' */
+- master_conntrack->layer7.app_data[length+oldlength] =
++ target[length+offset] =
+ isascii(app_data[i])?
+ tolower(app_data[i]) : app_data[i];
+ length++;
+ }
+ }
++ target[length+offset] = '\0';
++
++ return length;
++}
++
++/* add the new app data to the conntrack. Return number of bytes added. */
++static int add_data(struct nf_conn * master_conntrack,
++ char * app_data, int appdatalen)
++{
++ int length;
+
+- master_conntrack->layer7.app_data[length+oldlength] = '\0';
+- master_conntrack->layer7.app_data_len = length + oldlength;
++ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
++ master_conntrack->layer7.app_data_len += length;
+
+ return length;
+ }
+@@ -438,7 +440,7 @@ match(const struct sk_buff *skbin,
+
+ enum ip_conntrack_info master_ctinfo, ctinfo;
+ struct nf_conn *master_conntrack, *conntrack;
+- unsigned char * app_data;
++ unsigned char *app_data, *tmp_data;
+ unsigned int pattern_result, appdatalen;
+ regexp * comppattern;
+
+@@ -466,8 +468,8 @@ match(const struct sk_buff *skbin,
+ master_conntrack = master_ct(master_conntrack);
+
+ /* if we've classified it or seen too many packets */
+- if(total_acct_packets(master_conntrack) > num_packets ||
+- master_conntrack->layer7.app_proto) {
++ if(!info->pkt && (total_acct_packets(master_conntrack) > num_packets ||
++ master_conntrack->layer7.app_proto)) {
+
+ pattern_result = match_no_append(conntrack, master_conntrack,
+ ctinfo, master_ctinfo, info);
+@@ -500,6 +502,25 @@ match(const struct sk_buff *skbin,
+ /* the return value gets checked later, when we're ready to use it */
+ comppattern = compile_and_cache(info->pattern, info->protocol);
+
++ if (info->pkt) {
++ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
++ if(!tmp_data){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++ return info->invert;
++ }
++
++ tmp_data[0] = '\0';
++ add_datastr(tmp_data, 0, app_data, appdatalen);
++ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
++
++ kfree(tmp_data);
++ tmp_data = NULL;
++ spin_unlock_bh(&l7_lock);
++
++ return (pattern_result ^ info->invert);
++ }
++
+ /* On the first packet of a connection, allocate space for app data */
+ if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
+ !master_conntrack->layer7.app_data){
diff --git a/target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch b/target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch
new file mode 100644
index 0000000..b2e48c8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch
@@ -0,0 +1,51 @@
+--- a/net/netfilter/xt_layer7.c
++++ b/net/netfilter/xt_layer7.c
+@@ -415,7 +415,9 @@ static int layer7_write_proc(struct file
+ }
+
+ static bool
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++match(const struct sk_buff *skbin, struct xt_action_param *par)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ match(const struct sk_buff *skbin, const struct xt_match_param *par)
+ #else
+ match(const struct sk_buff *skbin,
+@@ -597,14 +599,19 @@ match(const struct sk_buff *skbin,
+ }
+
+ // load nf_conntrack_ipv4
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++static int
++#else
++static bool
++#endif
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+-static bool check(const struct xt_mtchk_param *par)
++check(const struct xt_mtchk_param *par)
+ {
+ if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
+ printk(KERN_WARNING "can't load conntrack support for "
+ "proto=%d\n", par->match->family);
+ #else
+-static bool check(const char *tablename, const void *inf,
++check(const char *tablename, const void *inf,
+ const struct xt_match *match, void *matchinfo,
+ unsigned int hook_mask)
+ {
+@@ -612,9 +619,15 @@ static bool check(const char *tablename,
+ printk(KERN_WARNING "can't load conntrack support for "
+ "proto=%d\n", match->family);
+ #endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++ return -EINVAL;
++ }
++ return 0;
++#else
+ return 0;
+ }
+ return 1;
++#endif
+ }
+
+
diff --git a/target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch b/target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch
new file mode 100644
index 0000000..92a7200
--- /dev/null
+++ b/target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch
@@ -0,0 +1,61 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -857,6 +857,27 @@ config NETFILTER_XT_MATCH_IPVS
+
+ If unsure, say N.
+
++config NETFILTER_XT_MATCH_LAYER7
++ tristate '"layer7" match support'
++ depends on EXPERIMENTAL
++ depends on NETFILTER_XTABLES
++ depends on NETFILTER_ADVANCED
++ depends on NF_CONNTRACK
++ help
++ Say Y if you want to be able to classify connections (and their
++ packets) based on regular expression matching of their application
++ layer data. This is one way to classify applications such as
++ peer-to-peer filesharing systems that do not always use the same
++ port.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config NETFILTER_XT_MATCH_LAYER7_DEBUG
++ bool 'Layer 7 debugging output'
++ depends on NETFILTER_XT_MATCH_LAYER7
++ help
++ Say Y to get lots of debugging output.
++
+ config NETFILTER_XT_MATCH_LENGTH
+ tristate '"length" match support'
+ depends on NETFILTER_ADVANCED
+@@ -1053,26 +1074,11 @@ config NETFILTER_XT_MATCH_STATE
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+-config NETFILTER_XT_MATCH_LAYER7
+- tristate '"layer7" match support'
+- depends on NETFILTER_XTABLES
+- depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK)
+- depends on NETFILTER_ADVANCED
+- help
+- Say Y if you want to be able to classify connections (and their
+- packets) based on regular expression matching of their application
+- layer data. This is one way to classify applications such as
+- peer-to-peer filesharing systems that do not always use the same
+- port.
+-
+- To compile it as a module, choose M here. If unsure, say N.
+-
+ config NETFILTER_XT_MATCH_LAYER7_DEBUG
+- bool 'Layer 7 debugging output'
+- depends on NETFILTER_XT_MATCH_LAYER7
+- help
+- Say Y to get lots of debugging output.
+-
++ bool 'Layer 7 debugging output'
++ depends on NETFILTER_XT_MATCH_LAYER7
++ help
++ Say Y to get lots of debugging output.
+
+ config NETFILTER_XT_MATCH_STATISTIC
+ tristate '"statistic" match support'
diff --git a/target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch b/target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch
new file mode 100644
index 0000000..662a499
--- /dev/null
+++ b/target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch
@@ -0,0 +1,118 @@
+--- a/include/linux/netfilter/nf_conntrack_sip.h
++++ b/include/linux/netfilter/nf_conntrack_sip.h
+@@ -2,12 +2,15 @@
+ #define __NF_CONNTRACK_SIP_H__
+ #ifdef __KERNEL__
+
++#include <linux/types.h>
++
+ #define SIP_PORT 5060
+ #define SIP_TIMEOUT 3600
+
+ struct nf_ct_sip_master {
+ unsigned int register_cseq;
+ unsigned int invite_cseq;
++ __be16 forced_dport;
+ };
+
+ enum sip_expectation_classes {
+--- a/net/ipv4/netfilter/nf_nat_sip.c
++++ b/net/ipv4/netfilter/nf_nat_sip.c
+@@ -73,6 +73,7 @@ static int map_addr(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+ unsigned int buflen;
+ __be32 newaddr;
+@@ -85,7 +86,8 @@ static int map_addr(struct sk_buff *skb,
+ } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip &&
+ ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
+ newaddr = ct->tuplehash[!dir].tuple.src.u3.ip;
+- newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
++ newport = help->help.ct_sip_info.forced_dport ? :
++ ct->tuplehash[!dir].tuple.src.u.udp.port;
+ } else
+ return 1;
+
+@@ -121,6 +123,7 @@ static unsigned int ip_nat_sip(struct sk
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
+ unsigned int coff, matchoff, matchlen;
+ enum sip_header_types hdr;
+ union nf_inet_addr addr;
+@@ -229,6 +232,20 @@ next:
+ !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
+ return NF_DROP;
+
++ /* Mangle destination port for Cisco phones, then fix up checksums */
++ if (dir == IP_CT_DIR_REPLY && help->help.ct_sip_info.forced_dport) {
++ struct udphdr *uh;
++
++ if (!skb_make_writable(skb, skb->len))
++ return NF_DROP;
++
++ uh = (struct udphdr *)(skb->data + ip_hdrlen(skb));
++ uh->dest = help->help.ct_sip_info.forced_dport;
++
++ if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 0, 0, NULL, 0))
++ return NF_DROP;
++ }
++
+ return NF_ACCEPT;
+ }
+
+@@ -280,8 +297,10 @@ static unsigned int ip_nat_sip_expect(st
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
+ __be32 newip;
+ u_int16_t port;
++ __be16 srcport;
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+ unsigned buflen;
+
+@@ -294,8 +313,9 @@ static unsigned int ip_nat_sip_expect(st
+ /* If the signalling port matches the connection's source port in the
+ * original direction, try to use the destination port in the opposite
+ * direction. */
+- if (exp->tuple.dst.u.udp.port ==
+- ct->tuplehash[dir].tuple.src.u.udp.port)
++ srcport = help->help.ct_sip_info.forced_dport ? :
++ ct->tuplehash[dir].tuple.src.u.udp.port;
++ if (exp->tuple.dst.u.udp.port == srcport)
+ port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
+ else
+ port = ntohs(exp->tuple.dst.u.udp.port);
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1363,8 +1363,25 @@ static int process_sip_request(struct sk
+ {
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
++ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ unsigned int matchoff, matchlen;
+ unsigned int cseq, i;
++ union nf_inet_addr addr;
++ __be16 port;
++
++ /* Many Cisco IP phones use a high source port for SIP requests, but
++ * listen for the response on port 5060. If we are the local
++ * router for one of these phones, save the port number from the
++ * Via: header so that nf_nat_sip can redirect the responses to
++ * the correct port.
++ */
++ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
++ SIP_HDR_VIA_UDP, NULL, &matchoff,
++ &matchlen, &addr, &port) > 0 &&
++ port != ct->tuplehash[dir].tuple.src.u.udp.port &&
++ nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
++ help->help.ct_sip_info.forced_dport = port;
+
+ for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
+ const struct sip_handler *handler;
diff --git a/target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch
new file mode 100644
index 0000000..51c9e09
--- /dev/null
+++ b/target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch
@@ -0,0 +1,93 @@
+--- a/include/linux/netfilter_ipv4/ip_tables.h
++++ b/include/linux/netfilter_ipv4/ip_tables.h
+@@ -93,6 +93,7 @@ struct ipt_ip {
+ #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
+ #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
+ #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
+
+ /* Values for "inv" field in struct ipt_ip. */
+ #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -81,6 +81,9 @@ ip_packet_match(const struct iphdr *ip,
+
+ #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
+
++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
++ return true;
++
+ if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ IPT_INV_SRCIP) ||
+ FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+@@ -134,6 +137,29 @@ ip_packet_match(const struct iphdr *ip,
+ return true;
+ }
+
++static void
++ip_checkdefault(struct ipt_ip *ip)
++{
++ static const char iface_mask[IFNAMSIZ] = {};
++
++ if (ip->invflags || ip->flags & IPT_F_FRAG)
++ return;
++
++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (ip->smsk.s_addr || ip->dmsk.s_addr)
++ return;
++
++ if (ip->proto)
++ return;
++
++ ip->flags |= IPT_F_NO_DEF_MATCH;
++}
++
+ static bool
+ ip_checkentry(const struct ipt_ip *ip)
+ {
+@@ -561,7 +587,7 @@ static void cleanup_match(struct xt_entr
+ }
+
+ static int
+-check_entry(const struct ipt_entry *e, const char *name)
++check_entry(struct ipt_entry *e, const char *name)
+ {
+ const struct xt_entry_target *t;
+
+@@ -570,6 +596,8 @@ check_entry(const struct ipt_entry *e, c
+ return -EINVAL;
+ }
+
++ ip_checkdefault(&e->ip);
++
+ if (e->target_offset + sizeof(struct xt_entry_target) >
+ e->next_offset)
+ return -EINVAL;
+@@ -931,6 +959,7 @@ copy_entries_to_user(unsigned int total_
+ const struct xt_table_info *private = table->private;
+ int ret = 0;
+ const void *loc_cpu_entry;
++ u8 flags;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+@@ -961,6 +990,14 @@ copy_entries_to_user(unsigned int total_
+ ret = -EFAULT;
+ goto free_counters;
+ }
++
++ flags = e->ip.flags & IPT_F_MASK;
++ if (copy_to_user(userptr + off
++ + offsetof(struct ipt_entry, ip.flags),
++ &flags, sizeof(flags)) != 0) {
++ ret = -EFAULT;
++ goto free_counters;
++ }
+
+ for (i = sizeof(struct ipt_entry);
+ i < e->target_offset;
diff --git a/target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch
new file mode 100644
index 0000000..3cf0e5a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch
@@ -0,0 +1,81 @@
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -310,6 +310,33 @@ struct ipt_entry *ipt_next_entry(const s
+ return (void *)entry + entry->next_offset;
+ }
+
++static bool
++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
++{
++ struct xt_entry_target *t;
++ struct xt_standard_target *st;
++
++ if (e->target_offset != sizeof(struct ipt_entry))
++ return false;
++
++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
++ return false;
++
++ t = ipt_get_target(e);
++ if (t->u.kernel.target->target)
++ return false;
++
++ st = (struct xt_standard_target *) t;
++ if (st->verdict == XT_RETURN)
++ return false;
++
++ if (st->verdict >= 0)
++ return false;
++
++ *verdict = (unsigned)(-st->verdict) - 1;
++ return true;
++}
++
+ /* Returns one of the generic firewall policies, like NF_ACCEPT. */
+ unsigned int
+ ipt_do_table(struct sk_buff *skb,
+@@ -334,6 +361,25 @@ ipt_do_table(struct sk_buff *skb,
+ ip = ip_hdr(skb);
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
++
++ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
++ local_bh_disable();
++ addend = xt_write_recseq_begin();
++ private = table->private;
++ cpu = smp_processor_id();
++ table_base = private->entries[cpu];
++ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
++ stackptr = per_cpu_ptr(private->stackptr, cpu);
++ origptr = *stackptr;
++
++ e = get_entry(table_base, private->hook_entry[hook]);
++ if (ipt_handle_default_rule(e, &verdict)) {
++ ADD_COUNTER(e->counters, skb->len, 1);
++ xt_write_recseq_end(addend);
++ local_bh_enable();
++ return verdict;
++ }
++
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+@@ -348,18 +394,6 @@ ipt_do_table(struct sk_buff *skb,
+ acpar.family = NFPROTO_IPV4;
+ acpar.hooknum = hook;
+
+- IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+- local_bh_disable();
+- addend = xt_write_recseq_begin();
+- private = table->private;
+- cpu = smp_processor_id();
+- table_base = private->entries[cpu];
+- jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+- stackptr = per_cpu_ptr(private->stackptr, cpu);
+- origptr = *stackptr;
+-
+- e = get_entry(table_base, private->hook_entry[hook]);
+-
+ pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
+ table->name, hook, origptr,
+ get_entry(table_base, private->underflow[hook]));
diff --git a/target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch
new file mode 100644
index 0000000..f506165
--- /dev/null
+++ b/target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch
@@ -0,0 +1,16 @@
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -84,9 +84,11 @@ ip_packet_match(const struct iphdr *ip,
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
+- if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
++ if (FWINV(ipinfo->smsk.s_addr &&
++ (ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ IPT_INV_SRCIP) ||
+- FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
++ FWINV(ipinfo->dmsk.s_addr &&
++ (ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+ IPT_INV_DSTIP)) {
+ dprintf("Source or dest mismatch.\n");
+
diff --git a/target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch
new file mode 100644
index 0000000..1c259d4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch
@@ -0,0 +1,36 @@
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -29,6 +29,9 @@
+ #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+ #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
++/* Do not check the TCP window for incoming packets */
++static int nf_ct_tcp_no_window_check __read_mostly = 1;
++
+ /* "Be conservative in what you do,
+ be liberal in what you accept from others."
+ If it's non-zero, we mark only out of window RST segments as INVALID. */
+@@ -524,6 +527,9 @@ static bool tcp_in_window(const struct n
+ s16 receiver_offset;
+ bool res;
+
++ if (nf_ct_tcp_no_window_check)
++ return true;
++
+ /*
+ * Get the required data from the packet.
+ */
+@@ -1321,6 +1327,13 @@ static struct ctl_table tcp_sysctl_table
+ .proc_handler = proc_dointvec,
+ },
+ {
++ .procname = "nf_conntrack_tcp_no_window_check",
++ .data = &nf_ct_tcp_no_window_check,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
+ .procname = "nf_conntrack_tcp_be_liberal",
+ .data = &nf_ct_tcp_be_liberal,
+ .maxlen = sizeof(unsigned int),
diff --git a/target/linux/generic/patches-3.3/620-sched_esfq.patch b/target/linux/generic/patches-3.3/620-sched_esfq.patch
new file mode 100644
index 0000000..1fdf09d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/620-sched_esfq.patch
@@ -0,0 +1,791 @@
+--- a/include/linux/pkt_sched.h
++++ b/include/linux/pkt_sched.h
+@@ -193,6 +193,33 @@ struct tc_sfq_xstats {
+ __s32 allot;
+ };
+
++/* ESFQ section */
++
++enum
++{
++ /* traditional */
++ TCA_SFQ_HASH_CLASSIC,
++ TCA_SFQ_HASH_DST,
++ TCA_SFQ_HASH_SRC,
++ TCA_SFQ_HASH_FWMARK,
++ /* conntrack */
++ TCA_SFQ_HASH_CTORIGDST,
++ TCA_SFQ_HASH_CTORIGSRC,
++ TCA_SFQ_HASH_CTREPLDST,
++ TCA_SFQ_HASH_CTREPLSRC,
++ TCA_SFQ_HASH_CTNATCHG,
++};
++
++struct tc_esfq_qopt
++{
++ unsigned quantum; /* Bytes per round allocated to flow */
++ int perturb_period; /* Period of hash perturbation */
++ __u32 limit; /* Maximal packets in queue */
++ unsigned divisor; /* Hash divisor */
++ unsigned flows; /* Maximal number of flows */
++ unsigned hash_kind; /* Hash function to use for flow identification */
++};
++
+ /* RED section */
+
+ enum {
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -148,6 +148,37 @@ config NET_SCH_SFQ
+ To compile this code as a module, choose M here: the
+ module will be called sch_sfq.
+
++config NET_SCH_ESFQ
++ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
++ ---help---
++ Say Y here if you want to use the Enhanced Stochastic Fairness
++ Queueing (ESFQ) packet scheduling algorithm for some of your network
++ devices or as a leaf discipline for a classful qdisc such as HTB or
++ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
++ references to the SFQ algorithm).
++
++ This is an enchanced SFQ version which allows you to control some
++ hardcoded values in the SFQ scheduler.
++
++ ESFQ also adds control of the hash function used to identify packet
++ flows. The original SFQ discipline hashes by connection; ESFQ add
++ several other hashing methods, such as by src IP or by dst IP, which
++ can be more fair to users in some networking situations.
++
++ To compile this code as a module, choose M here: the
++ module will be called sch_esfq.
++
++config NET_SCH_ESFQ_NFCT
++ bool "Connection Tracking Hash Types"
++ depends on NET_SCH_ESFQ && NF_CONNTRACK
++ ---help---
++ Say Y here to enable support for hashing based on netfilter connection
++ tracking information. This is useful for a router that is also using
++ NAT to connect privately-addressed hosts to the Internet. If you want
++ to provide fair distribution of upstream bandwidth, ESFQ must use
++ connection tracking information, since all outgoing packets will share
++ the same source address.
++
+ config NET_SCH_TEQL
+ tristate "True Link Equalizer (TEQL)"
+ ---help---
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_INGRESS) += sch_ing
+ obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
+ obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
+ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
++obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
+ obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
+ obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
+ obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
+--- /dev/null
++++ b/net/sched/sch_esfq.c
+@@ -0,0 +1,702 @@
++/*
++ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
++ *
++ * Changes: Alexander Atanasov, <alex@ssi.bg>
++ * Added dynamic depth,limit,divisor,hash_kind options.
++ * Added dst and src hashes.
++ *
++ * Alexander Clouter, <alex@digriz.org.uk>
++ * Ported ESFQ to Linux 2.6.
++ *
++ * Corey Hickey, <bugfood-c@fatooh.org>
++ * Maintenance of the Linux 2.6 port.
++ * Added fwmark hash (thanks to Robert Kurjata).
++ * Added usage of jhash.
++ * Added conntrack support.
++ * Added ctnatchg hash (thanks to Ben Pfountz).
++ */
++
++#include <linux/module.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <linux/bitops.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/jiffies.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/in.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/if_ether.h>
++#include <linux/inet.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/notifier.h>
++#include <linux/init.h>
++#include <net/ip.h>
++#include <net/netlink.h>
++#include <linux/ipv6.h>
++#include <net/route.h>
++#include <linux/skbuff.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <linux/jhash.h>
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++#include <net/netfilter/nf_conntrack.h>
++#endif
++
++/* Stochastic Fairness Queuing algorithm.
++ For more comments look at sch_sfq.c.
++ The difference is that you can change limit, depth,
++ hash table size and choose alternate hash types.
++
++ classic: same as in sch_sfq.c
++ dst: destination IP address
++ src: source IP address
++ fwmark: netfilter mark value
++ ctorigdst: original destination IP address
++ ctorigsrc: original source IP address
++ ctrepldst: reply destination IP address
++ ctreplsrc: reply source IP
++
++*/
++
++#define ESFQ_HEAD 0
++#define ESFQ_TAIL 1
++
++/* This type should contain at least SFQ_DEPTH*2 values */
++typedef unsigned int esfq_index;
++
++struct esfq_head
++{
++ esfq_index next;
++ esfq_index prev;
++};
++
++struct esfq_sched_data
++{
++/* Parameters */
++ int perturb_period;
++ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
++ int limit;
++ unsigned depth;
++ unsigned hash_divisor;
++ unsigned hash_kind;
++/* Variables */
++ struct timer_list perturb_timer;
++ int perturbation;
++ esfq_index tail; /* Index of current slot in round */
++ esfq_index max_depth; /* Maximal depth */
++
++ esfq_index *ht; /* Hash table */
++ esfq_index *next; /* Active slots link */
++ short *allot; /* Current allotment per slot */
++ unsigned short *hash; /* Hash value indexed by slots */
++ struct sk_buff_head *qs; /* Slot queue */
++ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
++};
++
++/* This contains the info we will hash. */
++struct esfq_packet_info
++{
++ u32 proto; /* protocol or port */
++ u32 src; /* source from packet header */
++ u32 dst; /* destination from packet header */
++ u32 ctorigsrc; /* original source from conntrack */
++ u32 ctorigdst; /* original destination from conntrack */
++ u32 ctreplsrc; /* reply source from conntrack */
++ u32 ctrepldst; /* reply destination from conntrack */
++ u32 mark; /* netfilter mark (fwmark) */
++};
++
++static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
++{
++ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
++}
++
++static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
++{
++ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
++}
++
++static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
++{
++ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
++}
++
++static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
++{
++ struct esfq_packet_info info;
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++ enum ip_conntrack_info ctinfo;
++ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
++#endif
++
++ switch (skb->protocol) {
++ case __constant_htons(ETH_P_IP):
++ {
++ struct iphdr *iph = ip_hdr(skb);
++ info.dst = iph->daddr;
++ info.src = iph->saddr;
++ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
++ (iph->protocol == IPPROTO_TCP ||
++ iph->protocol == IPPROTO_UDP ||
++ iph->protocol == IPPROTO_SCTP ||
++ iph->protocol == IPPROTO_DCCP ||
++ iph->protocol == IPPROTO_ESP))
++ info.proto = *(((u32*)iph) + iph->ihl);
++ else
++ info.proto = iph->protocol;
++ break;
++ }
++ case __constant_htons(ETH_P_IPV6):
++ {
++ struct ipv6hdr *iph = ipv6_hdr(skb);
++ /* Hash ipv6 addresses into a u32. This isn't ideal,
++ * but the code is simple. */
++ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
++ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
++ if (iph->nexthdr == IPPROTO_TCP ||
++ iph->nexthdr == IPPROTO_UDP ||
++ iph->nexthdr == IPPROTO_SCTP ||
++ iph->nexthdr == IPPROTO_DCCP ||
++ iph->nexthdr == IPPROTO_ESP)
++ info.proto = *(u32*)&iph[1];
++ else
++ info.proto = iph->nexthdr;
++ break;
++ }
++ default:
++ info.dst = (u32)(unsigned long)skb_dst(skb);
++ info.src = (u32)(unsigned long)skb->sk;
++ info.proto = skb->protocol;
++ }
++
++ info.mark = skb->mark;
++
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++ /* defaults if there is no conntrack info */
++ info.ctorigsrc = info.src;
++ info.ctorigdst = info.dst;
++ info.ctreplsrc = info.dst;
++ info.ctrepldst = info.src;
++ /* collect conntrack info */
++ if (ct && ct != &nf_conntrack_untracked) {
++ if (skb->protocol == __constant_htons(ETH_P_IP)) {
++ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
++ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
++ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
++ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
++ }
++ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
++ /* Again, hash ipv6 addresses into a single u32. */
++ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
++ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
++ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
++ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
++ }
++
++ }
++#endif
++
++ switch(q->hash_kind) {
++ case TCA_SFQ_HASH_CLASSIC:
++ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
++ case TCA_SFQ_HASH_DST:
++ return esfq_jhash_1word(q, info.dst);
++ case TCA_SFQ_HASH_SRC:
++ return esfq_jhash_1word(q, info.src);
++ case TCA_SFQ_HASH_FWMARK:
++ return esfq_jhash_1word(q, info.mark);
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++ case TCA_SFQ_HASH_CTORIGDST:
++ return esfq_jhash_1word(q, info.ctorigdst);
++ case TCA_SFQ_HASH_CTORIGSRC:
++ return esfq_jhash_1word(q, info.ctorigsrc);
++ case TCA_SFQ_HASH_CTREPLDST:
++ return esfq_jhash_1word(q, info.ctrepldst);
++ case TCA_SFQ_HASH_CTREPLSRC:
++ return esfq_jhash_1word(q, info.ctreplsrc);
++ case TCA_SFQ_HASH_CTNATCHG:
++ {
++ if (info.ctorigdst == info.ctreplsrc)
++ return esfq_jhash_1word(q, info.ctorigsrc);
++ return esfq_jhash_1word(q, info.ctreplsrc);
++ }
++#endif
++ default:
++ if (net_ratelimit())
++ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
++ }
++ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
++}
++
++static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
++{
++ esfq_index p, n;
++ int d = q->qs[x].qlen + q->depth;
++
++ p = d;
++ n = q->dep[d].next;
++ q->dep[x].next = n;
++ q->dep[x].prev = p;
++ q->dep[p].next = q->dep[n].prev = x;
++}
++
++static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
++{
++ esfq_index p, n;
++
++ n = q->dep[x].next;
++ p = q->dep[x].prev;
++ q->dep[p].next = n;
++ q->dep[n].prev = p;
++
++ if (n == p && q->max_depth == q->qs[x].qlen + 1)
++ q->max_depth--;
++
++ esfq_link(q, x);
++}
++
++static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
++{
++ esfq_index p, n;
++ int d;
++
++ n = q->dep[x].next;
++ p = q->dep[x].prev;
++ q->dep[p].next = n;
++ q->dep[n].prev = p;
++ d = q->qs[x].qlen;
++ if (q->max_depth < d)
++ q->max_depth = d;
++
++ esfq_link(q, x);
++}
++
++static unsigned int esfq_drop(struct Qdisc *sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_index d = q->max_depth;
++ struct sk_buff *skb;
++ unsigned int len;
++
++ /* Queue is full! Find the longest slot and
++ drop a packet from it */
++
++ if (d > 1) {
++ esfq_index x = q->dep[d+q->depth].next;
++ skb = q->qs[x].prev;
++ len = skb->len;
++ __skb_unlink(skb, &q->qs[x]);
++ kfree_skb(skb);
++ esfq_dec(q, x);
++ sch->q.qlen--;
++ sch->qstats.drops++;
++ sch->qstats.backlog -= len;
++ return len;
++ }
++
++ if (d == 1) {
++ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
++ d = q->next[q->tail];
++ q->next[q->tail] = q->next[d];
++ q->allot[q->next[d]] += q->quantum;
++ skb = q->qs[d].prev;
++ len = skb->len;
++ __skb_unlink(skb, &q->qs[d]);
++ kfree_skb(skb);
++ esfq_dec(q, d);
++ sch->q.qlen--;
++ q->ht[q->hash[d]] = q->depth;
++ sch->qstats.drops++;
++ sch->qstats.backlog -= len;
++ return len;
++ }
++
++ return 0;
++}
++
++static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end)
++{
++ unsigned hash = esfq_hash(q, skb);
++ unsigned depth = q->depth;
++ esfq_index x;
++
++ x = q->ht[hash];
++ if (x == depth) {
++ q->ht[hash] = x = q->dep[depth].next;
++ q->hash[x] = hash;
++ }
++
++ if (end == ESFQ_TAIL)
++ __skb_queue_tail(&q->qs[x], skb);
++ else
++ __skb_queue_head(&q->qs[x], skb);
++
++ esfq_inc(q, x);
++ if (q->qs[x].qlen == 1) { /* The flow is new */
++ if (q->tail == depth) { /* It is the first flow */
++ q->tail = x;
++ q->next[x] = x;
++ q->allot[x] = q->quantum;
++ } else {
++ q->next[x] = q->next[q->tail];
++ q->next[q->tail] = x;
++ q->tail = x;
++ }
++ }
++}
++
++static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_q_enqueue(skb, q, ESFQ_TAIL);
++ sch->qstats.backlog += skb->len;
++ if (++sch->q.qlen < q->limit-1) {
++ sch->bstats.bytes += skb->len;
++ sch->bstats.packets++;
++ return 0;
++ }
++
++ sch->qstats.drops++;
++ esfq_drop(sch);
++ return NET_XMIT_CN;
++}
++
++static struct sk_buff *esfq_peek(struct Qdisc* sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_index a;
++
++ /* No active slots */
++ if (q->tail == q->depth)
++ return NULL;
++
++ a = q->next[q->tail];
++ return skb_peek(&q->qs[a]);
++}
++
++static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q)
++{
++ struct sk_buff *skb;
++ unsigned depth = q->depth;
++ esfq_index a, old_a;
++
++ /* No active slots */
++ if (q->tail == depth)
++ return NULL;
++
++ a = old_a = q->next[q->tail];
++
++ /* Grab packet */
++ skb = __skb_dequeue(&q->qs[a]);
++ esfq_dec(q, a);
++
++ /* Is the slot empty? */
++ if (q->qs[a].qlen == 0) {
++ q->ht[q->hash[a]] = depth;
++ a = q->next[a];
++ if (a == old_a) {
++ q->tail = depth;
++ return skb;
++ }
++ q->next[q->tail] = a;
++ q->allot[a] += q->quantum;
++ } else if ((q->allot[a] -= skb->len) <= 0) {
++ q->tail = a;
++ a = q->next[a];
++ q->allot[a] += q->quantum;
++ }
++
++ return skb;
++}
++
++static struct sk_buff *esfq_dequeue(struct Qdisc* sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ struct sk_buff *skb;
++
++ skb = esfq_q_dequeue(q);
++ if (skb == NULL)
++ return NULL;
++ sch->q.qlen--;
++ sch->qstats.backlog -= skb->len;
++ return skb;
++}
++
++static void esfq_q_destroy(struct esfq_sched_data *q)
++{
++ del_timer(&q->perturb_timer);
++ if(q->ht)
++ kfree(q->ht);
++ if(q->dep)
++ kfree(q->dep);
++ if(q->next)
++ kfree(q->next);
++ if(q->allot)
++ kfree(q->allot);
++ if(q->hash)
++ kfree(q->hash);
++ if(q->qs)
++ kfree(q->qs);
++}
++
++static void esfq_destroy(struct Qdisc *sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_q_destroy(q);
++}
++
++
++static void esfq_reset(struct Qdisc* sch)
++{
++ struct sk_buff *skb;
++
++ while ((skb = esfq_dequeue(sch)) != NULL)
++ kfree_skb(skb);
++}
++
++static void esfq_perturbation(unsigned long arg)
++{
++ struct Qdisc *sch = (struct Qdisc*)arg;
++ struct esfq_sched_data *q = qdisc_priv(sch);
++
++ q->perturbation = net_random()&0x1F;
++
++ if (q->perturb_period) {
++ q->perturb_timer.expires = jiffies + q->perturb_period;
++ add_timer(&q->perturb_timer);
++ }
++}
++
++static unsigned int esfq_check_hash(unsigned int kind)
++{
++ switch (kind) {
++ case TCA_SFQ_HASH_CTORIGDST:
++ case TCA_SFQ_HASH_CTORIGSRC:
++ case TCA_SFQ_HASH_CTREPLDST:
++ case TCA_SFQ_HASH_CTREPLSRC:
++ case TCA_SFQ_HASH_CTNATCHG:
++#ifndef CONFIG_NET_SCH_ESFQ_NFCT
++ {
++ if (net_ratelimit())
++ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n");
++ return TCA_SFQ_HASH_CLASSIC;
++ }
++#endif
++ case TCA_SFQ_HASH_CLASSIC:
++ case TCA_SFQ_HASH_DST:
++ case TCA_SFQ_HASH_SRC:
++ case TCA_SFQ_HASH_FWMARK:
++ return kind;
++ default:
++ {
++ if (net_ratelimit())
++ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n");
++ return TCA_SFQ_HASH_CLASSIC;
++ }
++ }
++}
++
++static int esfq_q_init(struct esfq_sched_data *q, struct nlattr *opt)
++{
++ struct tc_esfq_qopt *ctl = nla_data(opt);
++ esfq_index p = ~0U/2;
++ int i;
++
++ if (opt && opt->nla_len < nla_attr_size(sizeof(*ctl)))
++ return -EINVAL;
++
++ q->perturbation = 0;
++ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
++ q->max_depth = 0;
++ if (opt == NULL) {
++ q->perturb_period = 0;
++ q->hash_divisor = 1024;
++ q->tail = q->limit = q->depth = 128;
++
++ } else {
++ struct tc_esfq_qopt *ctl = nla_data(opt);
++ if (ctl->quantum)
++ q->quantum = ctl->quantum;
++ q->perturb_period = ctl->perturb_period*HZ;
++ q->hash_divisor = ctl->divisor ? : 1024;
++ q->tail = q->limit = q->depth = ctl->flows ? : 128;
++
++ if ( q->depth > p - 1 )
++ return -EINVAL;
++
++ if (ctl->limit)
++ q->limit = min_t(u32, ctl->limit, q->depth);
++
++ if (ctl->hash_kind) {
++ q->hash_kind = esfq_check_hash(ctl->hash_kind);
++ }
++ }
++
++ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
++ if (!q->ht)
++ goto err_case;
++ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
++ if (!q->dep)
++ goto err_case;
++ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
++ if (!q->next)
++ goto err_case;
++ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
++ if (!q->allot)
++ goto err_case;
++ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
++ if (!q->hash)
++ goto err_case;
++ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
++ if (!q->qs)
++ goto err_case;
++
++ for (i=0; i< q->hash_divisor; i++)
++ q->ht[i] = q->depth;
++ for (i=0; i<q->depth; i++) {
++ skb_queue_head_init(&q->qs[i]);
++ q->dep[i+q->depth].next = i+q->depth;
++ q->dep[i+q->depth].prev = i+q->depth;
++ }
++
++ for (i=0; i<q->depth; i++)
++ esfq_link(q, i);
++ return 0;
++err_case:
++ esfq_q_destroy(q);
++ return -ENOBUFS;
++}
++
++static int esfq_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ int err;
++
++ q->quantum = psched_mtu(qdisc_dev(sch)); /* default */
++ if ((err = esfq_q_init(q, opt)))
++ return err;
++
++ init_timer(&q->perturb_timer);
++ q->perturb_timer.data = (unsigned long)sch;
++ q->perturb_timer.function = esfq_perturbation;
++ if (q->perturb_period) {
++ q->perturb_timer.expires = jiffies + q->perturb_period;
++ add_timer(&q->perturb_timer);
++ }
++
++ return 0;
++}
++
++static int esfq_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ struct esfq_sched_data new;
++ struct sk_buff *skb;
++ int err;
++
++ /* set up new queue */
++ memset(&new, 0, sizeof(struct esfq_sched_data));
++ new.quantum = psched_mtu(qdisc_dev(sch)); /* default */
++ if ((err = esfq_q_init(&new, opt)))
++ return err;
++
++ /* copy all packets from the old queue to the new queue */
++ sch_tree_lock(sch);
++ while ((skb = esfq_q_dequeue(q)) != NULL)
++ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
++
++ /* clean up the old queue */
++ esfq_q_destroy(q);
++
++ /* copy elements of the new queue into the old queue */
++ q->perturb_period = new.perturb_period;
++ q->quantum = new.quantum;
++ q->limit = new.limit;
++ q->depth = new.depth;
++ q->hash_divisor = new.hash_divisor;
++ q->hash_kind = new.hash_kind;
++ q->tail = new.tail;
++ q->max_depth = new.max_depth;
++ q->ht = new.ht;
++ q->dep = new.dep;
++ q->next = new.next;
++ q->allot = new.allot;
++ q->hash = new.hash;
++ q->qs = new.qs;
++
++ /* finish up */
++ if (q->perturb_period) {
++ q->perturb_timer.expires = jiffies + q->perturb_period;
++ add_timer(&q->perturb_timer);
++ } else {
++ q->perturbation = 0;
++ }
++ sch_tree_unlock(sch);
++ return 0;
++}
++
++static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ unsigned char *b = skb_tail_pointer(skb);
++ struct tc_esfq_qopt opt;
++
++ opt.quantum = q->quantum;
++ opt.perturb_period = q->perturb_period/HZ;
++
++ opt.limit = q->limit;
++ opt.divisor = q->hash_divisor;
++ opt.flows = q->depth;
++ opt.hash_kind = q->hash_kind;
++
++ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
++
++ return skb->len;
++
++nla_put_failure:
++ nlmsg_trim(skb, b);
++ return -1;
++}
++
++static struct Qdisc_ops esfq_qdisc_ops =
++{
++ .next = NULL,
++ .cl_ops = NULL,
++ .id = "esfq",
++ .priv_size = sizeof(struct esfq_sched_data),
++ .enqueue = esfq_enqueue,
++ .dequeue = esfq_dequeue,
++ .peek = esfq_peek,
++ .drop = esfq_drop,
++ .init = esfq_init,
++ .reset = esfq_reset,
++ .destroy = esfq_destroy,
++ .change = esfq_change,
++ .dump = esfq_dump,
++ .owner = THIS_MODULE,
++};
++
++static int __init esfq_module_init(void)
++{
++ return register_qdisc(&esfq_qdisc_ops);
++}
++static void __exit esfq_module_exit(void)
++{
++ unregister_qdisc(&esfq_qdisc_ops);
++}
++module_init(esfq_module_init)
++module_exit(esfq_module_exit)
++MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/patches-3.3/621-sched_act_connmark.patch b/target/linux/generic/patches-3.3/621-sched_act_connmark.patch
new file mode 100644
index 0000000..157421d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/621-sched_act_connmark.patch
@@ -0,0 +1,172 @@
+--- /dev/null
++++ b/net/sched/act_connmark.c
+@@ -0,0 +1,137 @@
++/*
++ * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
++ * Place - Suite 330, Boston, MA 02111-1307 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/skbuff.h>
++#include <linux/rtnetlink.h>
++#include <linux/pkt_cls.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <net/netlink.h>
++#include <net/pkt_sched.h>
++#include <net/act_api.h>
++
++#include <net/netfilter/nf_conntrack.h>
++#include <net/netfilter/nf_conntrack_core.h>
++
++#define TCA_ACT_CONNMARK 20
++
++#define CONNMARK_TAB_MASK 3
++static struct tcf_common *tcf_connmark_ht[CONNMARK_TAB_MASK + 1];
++static u32 connmark_idx_gen;
++static DEFINE_RWLOCK(connmark_lock);
++
++static struct tcf_hashinfo connmark_hash_info = {
++ .htab = tcf_connmark_ht,
++ .hmask = CONNMARK_TAB_MASK,
++ .lock = &connmark_lock,
++};
++
++static int tcf_connmark(struct sk_buff *skb, struct tc_action *a,
++ struct tcf_result *res)
++{
++ struct nf_conn *c;
++ enum ip_conntrack_info ctinfo;
++ int proto;
++ int r;
++
++ if (skb->protocol == htons(ETH_P_IP)) {
++ if (skb->len < sizeof(struct iphdr))
++ goto out;
++ proto = PF_INET;
++ } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ if (skb->len < sizeof(struct ipv6hdr))
++ goto out;
++ proto = PF_INET6;
++ } else
++ goto out;
++
++ r = nf_conntrack_in(dev_net(skb->dev), proto, NF_INET_PRE_ROUTING, skb);
++ if (r != NF_ACCEPT)
++ goto out;
++
++ c = nf_ct_get(skb, &ctinfo);
++ if (!c)
++ goto out;
++
++ skb->mark = c->mark;
++ nf_conntrack_put(skb->nfct);
++ skb->nfct = NULL;
++
++out:
++ return TC_ACT_PIPE;
++}
++
++static int tcf_connmark_init(struct nlattr *nla, struct nlattr *est,
++ struct tc_action *a, int ovr, int bind)
++{
++ struct tcf_common *pc;
++
++ pc = tcf_hash_create(0, est, a, sizeof(*pc), bind,
++ &connmark_idx_gen, &connmark_hash_info);
++ if (IS_ERR(pc))
++ return PTR_ERR(pc);
++
++ tcf_hash_insert(pc, &connmark_hash_info);
++
++ return ACT_P_CREATED;
++}
++
++static inline int tcf_connmark_cleanup(struct tc_action *a, int bind)
++{
++ if (a->priv)
++ return tcf_hash_release(a->priv, bind, &connmark_hash_info);
++ return 0;
++}
++
++static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
++ int bind, int ref)
++{
++ return skb->len;
++}
++
++static struct tc_action_ops act_connmark_ops = {
++ .kind = "connmark",
++ .hinfo = &connmark_hash_info,
++ .type = TCA_ACT_CONNMARK,
++ .capab = TCA_CAP_NONE,
++ .owner = THIS_MODULE,
++ .act = tcf_connmark,
++ .dump = tcf_connmark_dump,
++ .cleanup = tcf_connmark_cleanup,
++ .init = tcf_connmark_init,
++ .walk = tcf_generic_walker,
++};
++
++MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
++MODULE_DESCRIPTION("Connection tracking mark restoring");
++MODULE_LICENSE("GPL");
++
++static int __init connmark_init_module(void)
++{
++ return tcf_register_action(&act_connmark_ops);
++}
++
++static void __exit connmark_cleanup_module(void)
++{
++ tcf_unregister_action(&act_connmark_ops);
++}
++
++module_init(connmark_init_module);
++module_exit(connmark_cleanup_module);
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -602,6 +602,19 @@ config NET_ACT_CSUM
+ To compile this code as a module, choose M here: the
+ module will be called act_csum.
+
++config NET_ACT_CONNMARK
++ tristate "Connection Tracking Marking"
++ depends on NET_CLS_ACT
++ depends on NF_CONNTRACK
++ depends on NF_CONNTRACK_MARK
++ ---help---
++ Say Y here to restore the connmark from a scheduler action
++
++ If unsure, say N.
++
++ To compile this code as a module, choose M here: the
++ module will be called act_connmark.
++
+ config NET_CLS_IND
+ bool "Incoming device classification"
+ depends on NET_CLS_U32 || NET_CLS_FW
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit
+ obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
+ obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
+ obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
++obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
+ obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
+ obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
+ obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/target/linux/generic/patches-3.3/630-packet_socket_type.patch b/target/linux/generic/patches-3.3/630-packet_socket_type.patch
new file mode 100644
index 0000000..231b745
--- /dev/null
+++ b/target/linux/generic/patches-3.3/630-packet_socket_type.patch
@@ -0,0 +1,132 @@
+This patch allows the user to specify desired packet types (outgoing,
+broadcast, unicast, etc.) on packet sockets via setsockopt.
+This can reduce the load in situations where only a limited number
+of packet types are necessary
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+
+--- a/include/linux/if_packet.h
++++ b/include/linux/if_packet.h
+@@ -29,6 +29,8 @@ struct sockaddr_ll {
+ /* These ones are invisible by user level */
+ #define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
+ #define PACKET_FASTROUTE 6 /* Fastrouted frame */
++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */
++
+
+ /* Packet socket options */
+
+@@ -50,6 +52,7 @@ struct sockaddr_ll {
+ #define PACKET_TX_TIMESTAMP 16
+ #define PACKET_TIMESTAMP 17
+ #define PACKET_FANOUT 18
++#define PACKET_RECV_TYPE 19
+
+ #define PACKET_FANOUT_HASH 0
+ #define PACKET_FANOUT_LB 1
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -296,6 +296,7 @@ struct packet_sock {
+ unsigned int tp_loss:1;
+ unsigned int tp_tstamp;
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
++ unsigned int pkt_type;
+ };
+
+ #define PACKET_FANOUT_MAX 256
+@@ -1383,6 +1384,7 @@ static int packet_rcv_spkt(struct sk_buf
+ {
+ struct sock *sk;
+ struct sockaddr_pkt *spkt;
++ struct packet_sock *po;
+
+ /*
+ * When we registered the protocol we saved the socket in the data
+@@ -1390,6 +1392,7 @@ static int packet_rcv_spkt(struct sk_buf
+ */
+
+ sk = pt->af_packet_priv;
++ po = pkt_sk(sk);
+
+ /*
+ * Yank back the headers [hope the device set this
+@@ -1402,7 +1405,7 @@ static int packet_rcv_spkt(struct sk_buf
+ * so that this procedure is noop.
+ */
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto out;
+
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+@@ -1596,12 +1599,12 @@ static int packet_rcv(struct sk_buff *sk
+ int skb_len = skb->len;
+ unsigned int snaplen, res;
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -1720,12 +1723,12 @@ static int tpacket_rcv(struct sk_buff *s
+ struct timespec ts;
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -2595,6 +2598,7 @@ static int packet_create(struct net *net
+ spin_lock_init(&po->bind_lock);
+ mutex_init(&po->pg_vec_lock);
+ po->prot_hook.func = packet_rcv;
++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
+
+ if (sock->type == SOCK_PACKET)
+ po->prot_hook.func = packet_rcv_spkt;
+@@ -3192,6 +3196,16 @@ packet_setsockopt(struct socket *sock, i
+
+ return fanout_add(sk, val & 0xffff, val >> 16);
+ }
++ case PACKET_RECV_TYPE:
++ {
++ unsigned int val;
++ if (optlen != sizeof(val))
++ return -EINVAL;
++ if (copy_from_user(&val, optval, sizeof(val)))
++ return -EFAULT;
++ po->pkt_type = val & ~PACKET_LOOPBACK;
++ return 0;
++ }
+ default:
+ return -ENOPROTOOPT;
+ }
+@@ -3262,6 +3276,13 @@ static int packet_getsockopt(struct sock
+
+ data = &val;
+ break;
++ case PACKET_RECV_TYPE:
++ if (len > sizeof(unsigned int))
++ len = sizeof(unsigned int);
++ val = po->pkt_type;
++
++ data = &val;
++ break;
+ case PACKET_VERSION:
+ if (len > sizeof(int))
+ len = sizeof(int);
diff --git a/target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch b/target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch
new file mode 100644
index 0000000..bbdb3bf
--- /dev/null
+++ b/target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch
@@ -0,0 +1,15 @@
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -78,7 +78,11 @@ int br_handle_frame_finish(struct sk_buf
+
+ dst = NULL;
+
+- if (is_broadcast_ether_addr(dest))
++ if (skb->protocol == htons(ETH_P_PAE)) {
++ skb2 = skb;
++ /* Do not forward 802.1x/EAP frames */
++ skb = NULL;
++ } else if (is_broadcast_ether_addr(dest))
+ skb2 = skb;
+ else if (is_multicast_ether_addr(dest)) {
+ mdst = br_mdb_get(br, skb);
diff --git a/target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch b/target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch
new file mode 100644
index 0000000..e04199b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch
@@ -0,0 +1,11 @@
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -65,7 +65,7 @@ int br_handle_frame_finish(struct sk_buf
+ br_multicast_rcv(br, p, skb))
+ goto drop;
+
+- if (p->state == BR_STATE_LEARNING)
++ if ((p->state == BR_STATE_LEARNING) && skb->protocol != htons(ETH_P_PAE))
+ goto drop;
+
+ BR_INPUT_SKB_CB(skb)->brdev = br->dev;
diff --git a/target/linux/generic/patches-3.3/642-bridge_port_isolate.patch b/target/linux/generic/patches-3.3/642-bridge_port_isolate.patch
new file mode 100644
index 0000000..7c467fd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/642-bridge_port_isolate.patch
@@ -0,0 +1,103 @@
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -139,6 +139,7 @@ struct net_bridge_port
+
+ unsigned long flags;
+ #define BR_HAIRPIN_MODE 0x00000001
++#define BR_ISOLATE_MODE 0x00000002
+
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ u32 multicast_startup_queries_sent;
+--- a/net/bridge/br_sysfs_if.c
++++ b/net/bridge/br_sysfs_if.c
+@@ -149,6 +149,22 @@ static int store_hairpin_mode(struct net
+ static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
+ show_hairpin_mode, store_hairpin_mode);
+
++static ssize_t show_isolate_mode(struct net_bridge_port *p, char *buf)
++{
++ int isolate_mode = (p->flags & BR_ISOLATE_MODE) ? 1 : 0;
++ return sprintf(buf, "%d\n", isolate_mode);
++}
++static ssize_t store_isolate_mode(struct net_bridge_port *p, unsigned long v)
++{
++ if (v)
++ p->flags |= BR_ISOLATE_MODE;
++ else
++ p->flags &= ~BR_ISOLATE_MODE;
++ return 0;
++}
++static BRPORT_ATTR(isolate_mode, S_IRUGO | S_IWUSR,
++ show_isolate_mode, store_isolate_mode);
++
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
+ {
+@@ -181,6 +197,7 @@ static struct brport_attribute *brport_a
+ &brport_attr_hold_timer,
+ &brport_attr_flush,
+ &brport_attr_hairpin_mode,
++ &brport_attr_isolate_mode,
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ &brport_attr_multicast_router,
+ #endif
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -98,7 +98,8 @@ int br_handle_frame_finish(struct sk_buf
+ skb2 = skb;
+
+ br->dev->stats.multicast++;
+- } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
++ } else if ((p->flags & BR_ISOLATE_MODE) ||
++ ((dst = __br_fdb_get(br, dest)) && dst->is_local)) {
+ skb2 = skb;
+ /* Do not forward the packet since it's local. */
+ skb = NULL;
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -109,7 +109,7 @@ void br_deliver(const struct net_bridge_
+ /* called with rcu_read_lock */
+ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
+ {
+- if (should_deliver(to, skb)) {
++ if (should_deliver(to, skb) && !(to->flags & BR_ISOLATE_MODE)) {
+ if (skb0)
+ deliver_clone(to, skb, __br_forward);
+ else
+@@ -164,7 +164,8 @@ out:
+ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb0,
+ void (*__packet_hook)(const struct net_bridge_port *p,
+- struct sk_buff *skb))
++ struct sk_buff *skb),
++ bool forward)
+ {
+ struct net_bridge_port *p;
+ struct net_bridge_port *prev;
+@@ -172,6 +173,9 @@ static void br_flood(struct net_bridge *
+ prev = NULL;
+
+ list_for_each_entry_rcu(p, &br->port_list, list) {
++ if (forward && (p->flags & BR_ISOLATE_MODE))
++ continue;
++
+ prev = maybe_deliver(prev, p, skb, __packet_hook);
+ if (IS_ERR(prev))
+ goto out;
+@@ -195,14 +199,14 @@ out:
+ /* called with rcu_read_lock */
+ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
+ {
+- br_flood(br, skb, NULL, __br_deliver);
++ br_flood(br, skb, NULL, __br_deliver, false);
+ }
+
+ /* called under bridge lock */
+ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb2)
+ {
+- br_flood(br, skb, skb2, __br_forward);
++ br_flood(br, skb, skb2, __br_forward, true);
+ }
+
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
diff --git a/target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch b/target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch
new file mode 100644
index 0000000..1ca8979
--- /dev/null
+++ b/target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch
@@ -0,0 +1,107 @@
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -91,6 +91,12 @@ extern void addrconf_join_solict(struc
+ extern void addrconf_leave_solict(struct inet6_dev *idev,
+ const struct in6_addr *addr);
+
++extern int (*ipv6_dev_get_saddr_hook)(struct net *net,
++ struct net_device *dev,
++ const struct in6_addr *daddr,
++ unsigned int srcprefs,
++ struct in6_addr *saddr);
++
+ static inline unsigned long addrconf_timeout_fixup(u32 timeout,
+ unsigned unit)
+ {
+--- a/net/bridge/Kconfig
++++ b/net/bridge/Kconfig
+@@ -6,7 +6,6 @@ config BRIDGE
+ tristate "802.1d Ethernet Bridging"
+ select LLC
+ select STP
+- depends on IPV6 || IPV6=n
+ ---help---
+ If you say Y here, then your Linux box will be able to act as an
+ Ethernet bridge, which means that the different Ethernet segments it
+--- a/net/ipv6/Makefile
++++ b/net/ipv6/Makefile
+@@ -40,3 +40,4 @@ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.
+ obj-y += addrconf_core.o exthdrs_core.o
+
+ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
++obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_stubs.o
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1099,7 +1099,7 @@ out:
+ return ret;
+ }
+
+-int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
++static int __ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
+ const struct in6_addr *daddr, unsigned int prefs,
+ struct in6_addr *saddr)
+ {
+@@ -1224,7 +1224,6 @@ try_nextdev:
+ in6_ifa_put(hiscore->ifa);
+ return 0;
+ }
+-EXPORT_SYMBOL(ipv6_dev_get_saddr);
+
+ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags)
+@@ -4836,6 +4835,9 @@ int __init addrconf_init(void)
+
+ ipv6_addr_label_rtnl_register();
+
++ BUG_ON(ipv6_dev_get_saddr_hook != NULL);
++ rcu_assign_pointer(ipv6_dev_get_saddr_hook, __ipv6_dev_get_saddr);
++
+ return 0;
+ errout:
+ rtnl_af_unregister(&inet6_ops);
+@@ -4854,6 +4856,9 @@ void addrconf_cleanup(void)
+ struct net_device *dev;
+ int i;
+
++ rcu_assign_pointer(ipv6_dev_get_saddr_hook, NULL);
++ synchronize_rcu();
++
+ unregister_netdevice_notifier(&ipv6_dev_notf);
+ unregister_pernet_subsys(&addrconf_ops);
+ ipv6_addr_label_cleanup();
+--- /dev/null
++++ b/net/ipv6/inet6_stubs.c
+@@ -0,0 +1,33 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++#include <linux/export.h>
++#include <net/ipv6.h>
++
++int (*ipv6_dev_get_saddr_hook)(struct net *net, struct net_device *dev,
++ const struct in6_addr *daddr, unsigned int srcprefs,
++ struct in6_addr *saddr);
++
++EXPORT_SYMBOL(ipv6_dev_get_saddr_hook);
++
++int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
++ const struct in6_addr *daddr, unsigned int prefs,
++ struct in6_addr *saddr)
++{
++ int ret = -EADDRNOTAVAIL;
++ typeof(ipv6_dev_get_saddr_hook) dev_get_saddr;
++
++ rcu_read_lock();
++ dev_get_saddr = rcu_dereference(ipv6_dev_get_saddr_hook);
++
++ if (dev_get_saddr)
++ ret = dev_get_saddr(net, dst_dev, daddr, prefs, saddr);
++
++ rcu_read_unlock();
++ return ret;
++}
++EXPORT_SYMBOL(ipv6_dev_get_saddr);
++
diff --git a/target/linux/generic/patches-3.3/650-pppoe_header_pad.patch b/target/linux/generic/patches-3.3/650-pppoe_header_pad.patch
new file mode 100644
index 0000000..5862dc1
--- /dev/null
+++ b/target/linux/generic/patches-3.3/650-pppoe_header_pad.patch
@@ -0,0 +1,20 @@
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -856,7 +856,7 @@ static int pppoe_sendmsg(struct kiocb *i
+ goto end;
+
+
+- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
++ skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32 + NET_SKB_PAD,
+ 0, GFP_KERNEL);
+ if (!skb) {
+ error = -ENOMEM;
+@@ -864,7 +864,7 @@ static int pppoe_sendmsg(struct kiocb *i
+ }
+
+ /* Reserve space for headers. */
+- skb_reserve(skb, dev->hard_header_len);
++ skb_reserve(skb, dev->hard_header_len + NET_SKB_PAD);
+ skb_reset_network_header(skb);
+
+ skb->dev = dev;
diff --git a/target/linux/generic/patches-3.3/651-wireless_mesh_header.patch b/target/linux/generic/patches-3.3/651-wireless_mesh_header.patch
new file mode 100644
index 0000000..5c83d19
--- /dev/null
+++ b/target/linux/generic/patches-3.3/651-wireless_mesh_header.patch
@@ -0,0 +1,11 @@
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -145,7 +145,7 @@ static inline bool dev_xmit_complete(int
+ */
+
+ #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
+-# if defined(CONFIG_MAC80211_MESH)
++# if 1 || defined(CONFIG_MAC80211_MESH)
+ # define LL_MAX_HEADER 128
+ # else
+ # define LL_MAX_HEADER 96
diff --git a/target/linux/generic/patches-3.3/652-atm_header_changes.patch b/target/linux/generic/patches-3.3/652-atm_header_changes.patch
new file mode 100644
index 0000000..238d6f8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/652-atm_header_changes.patch
@@ -0,0 +1,12 @@
+--- a/include/linux/atm.h
++++ b/include/linux/atm.h
+@@ -139,6 +139,9 @@ struct atm_trafprm {
+ int min_pcr; /* minimum PCR in cells per second */
+ int max_cdv; /* maximum CDV in microseconds */
+ int max_sdu; /* maximum SDU in bytes */
++ int scr; /* sustained rate in cells per second */
++ int mbs; /* maximum burst size (MBS) in cells */
++ int cdv; /* Cell delay varition */
+ /* extra params for ABR */
+ unsigned int icr; /* Initial Cell Rate (24-bit) */
+ unsigned int tbe; /* Transient Buffer Exposure (24-bit) */
diff --git a/target/linux/generic/patches-3.3/700-swconfig.patch b/target/linux/generic/patches-3.3/700-swconfig.patch
new file mode 100644
index 0000000..48cb643
--- /dev/null
+++ b/target/linux/generic/patches-3.3/700-swconfig.patch
@@ -0,0 +1,29 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -13,6 +13,16 @@ menuconfig PHYLIB
+
+ if PHYLIB
+
++config SWCONFIG
++ tristate "Switch configuration API"
++ ---help---
++ Switch configuration API using netlink. This allows
++ you to configure the VLAN features of certain switches.
++
++config SWCONFIG_LEDS
++ bool "Switch LED trigger support"
++ depends on (SWCONFIG && LEDS_TRIGGERS)
++
+ comment "MII PHY device drivers"
+
+ config MARVELL_PHY
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -3,6 +3,7 @@
+ libphy-objs := phy.o phy_device.o mdio_bus.o
+
+ obj-$(CONFIG_PHYLIB) += libphy.o
++obj-$(CONFIG_SWCONFIG) += swconfig.o
+ obj-$(CONFIG_MARVELL_PHY) += marvell.o
+ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
+ obj-$(CONFIG_CICADA_PHY) += cicada.o
diff --git a/target/linux/generic/patches-3.3/701-phy_extension.patch b/target/linux/generic/patches-3.3/701-phy_extension.patch
new file mode 100644
index 0000000..201c857
--- /dev/null
+++ b/target/linux/generic/patches-3.3/701-phy_extension.patch
@@ -0,0 +1,72 @@
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -299,6 +299,50 @@ int phy_ethtool_gset(struct phy_device *
+ }
+ EXPORT_SYMBOL(phy_ethtool_gset);
+
++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr)
++{
++ u32 cmd;
++ int tmp;
++ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
++ struct ethtool_value edata = { ETHTOOL_GLINK };
++
++ if (get_user(cmd, (u32 *) useraddr))
++ return -EFAULT;
++
++ switch (cmd) {
++ case ETHTOOL_GSET:
++ phy_ethtool_gset(phydev, &ecmd);
++ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
++ return -EFAULT;
++ return 0;
++
++ case ETHTOOL_SSET:
++ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
++ return -EFAULT;
++ return phy_ethtool_sset(phydev, &ecmd);
++
++ case ETHTOOL_NWAY_RST:
++ /* if autoneg is off, it's an error */
++ tmp = phy_read(phydev, MII_BMCR);
++ if (tmp & BMCR_ANENABLE) {
++ tmp |= (BMCR_ANRESTART);
++ phy_write(phydev, MII_BMCR, tmp);
++ return 0;
++ }
++ return -EINVAL;
++
++ case ETHTOOL_GLINK:
++ edata.data = (phy_read(phydev,
++ MII_BMSR) & BMSR_LSTATUS) ? 1 : 0;
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++ }
++
++ return -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(phy_ethtool_ioctl);
++
+ /**
+ * phy_mii_ioctl - generic PHY MII ioctl interface
+ * @phydev: the phy_device struct
+@@ -474,7 +518,7 @@ static void phy_force_reduction(struct p
+ int idx;
+
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+-
++
+ idx++;
+
+ idx = phy_find_valid(idx, phydev->supported);
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -515,6 +515,7 @@ void phy_start_machine(struct phy_device
+ void phy_stop_machine(struct phy_device *phydev);
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr);
+ int phy_mii_ioctl(struct phy_device *phydev,
+ struct ifreq *ifr, int cmd);
+ int phy_start_interrupts(struct phy_device *phydev);
diff --git a/target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch b/target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch
new file mode 100644
index 0000000..0649afb
--- /dev/null
+++ b/target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch
@@ -0,0 +1,45 @@
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -393,9 +393,18 @@ struct phy_driver {
+ */
+ int (*config_aneg)(struct phy_device *phydev);
+
++ /* Determine if autonegotiation is done */
++ int (*aneg_done)(struct phy_device *phydev);
++
+ /* Determines the negotiated speed and duplex */
+ int (*read_status)(struct phy_device *phydev);
+
++ /*
++ * Update the value in phydev->link to reflect the
++ * current link value
++ */
++ int (*update_link)(struct phy_device *phydev);
++
+ /* Clears any pending interrupts */
+ int (*ack_interrupt)(struct phy_device *phydev);
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -705,6 +705,9 @@ int genphy_update_link(struct phy_device
+ {
+ int status;
+
++ if (phydev->drv->update_link)
++ return phydev->drv->update_link(phydev);
++
+ /* Do a fake read */
+ status = phy_read(phydev, MII_BMSR);
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -106,6 +106,9 @@ static inline int phy_aneg_done(struct p
+ {
+ int retval;
+
++ if (phydev->drv->aneg_done)
++ return phydev->drv->aneg_done(phydev);
++
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
diff --git a/target/linux/generic/patches-3.3/720-phy_adm6996.patch b/target/linux/generic/patches-3.3/720-phy_adm6996.patch
new file mode 100644
index 0000000..d4dbbb5
--- /dev/null
+++ b/target/linux/generic/patches-3.3/720-phy_adm6996.patch
@@ -0,0 +1,26 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -102,6 +102,13 @@ config MICREL_PHY
+ ---help---
+ Supports the KSZ9021, VSC8201, KS8001 PHYs.
+
++config ADM6996_PHY
++ tristate "Driver for ADM6996 switches"
++ select SWCONFIG
++ ---help---
++ Currently supports the ADM6996FC and ADM6996M switches.
++ Support for FC is very limited.
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -14,6 +14,7 @@ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
++obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/target/linux/generic/patches-3.3/721-phy_packets.patch b/target/linux/generic/patches-3.3/721-phy_packets.patch
new file mode 100644
index 0000000..e566371
--- /dev/null
+++ b/target/linux/generic/patches-3.3/721-phy_packets.patch
@@ -0,0 +1,63 @@
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -149,6 +149,18 @@ int phy_scan_fixups(struct phy_device *p
+ }
+ EXPORT_SYMBOL(phy_scan_fixups);
+
++static int generic_receive_skb(struct sk_buff *skb)
++{
++ skb->protocol = eth_type_trans(skb, skb->dev);
++ return netif_receive_skb(skb);
++}
++
++static int generic_rx(struct sk_buff *skb)
++{
++ skb->protocol = eth_type_trans(skb, skb->dev);
++ return netif_rx(skb);
++}
++
+ static struct phy_device* phy_device_create(struct mii_bus *bus,
+ int addr, int phy_id)
+ {
+@@ -180,6 +192,8 @@ static struct phy_device* phy_device_cre
+ dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
+
+ dev->state = PHY_DOWN;
++ dev->netif_receive_skb = &generic_receive_skb;
++ dev->netif_rx = &generic_rx;
+
+ mutex_init(&dev->lock);
+ INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -339,6 +339,20 @@ struct phy_device {
+ void (*adjust_link)(struct net_device *dev);
+
+ void (*adjust_state)(struct net_device *dev);
++
++ /*
++ * By default these point to the original functions
++ * with the same name. adding them to the phy_device
++ * allows the phy driver to override them for packet
++ * mangling if the ethernet driver supports it
++ * This is required to support some really horrible
++ * switches such as the Marvell 88E6060
++ */
++ int (*netif_receive_skb)(struct sk_buff *skb);
++ int (*netif_rx)(struct sk_buff *skb);
++
++ /* alignment offset for packets */
++ int pkt_align;
+ };
+ #define to_phy_device(d) container_of(d, struct phy_device, dev)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1134,6 +1134,7 @@ struct net_device {
+ void *ax25_ptr; /* AX.25 specific data */
+ struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
+ assign before registering */
++ void *phy_ptr; /* PHY device specific data */
+
+ /*
+ * Cache lines mostly used on receive path (including eth_type_trans())
diff --git a/target/linux/generic/patches-3.3/722-phy_mvswitch.patch b/target/linux/generic/patches-3.3/722-phy_mvswitch.patch
new file mode 100644
index 0000000..925cc45
--- /dev/null
+++ b/target/linux/generic/patches-3.3/722-phy_mvswitch.patch
@@ -0,0 +1,22 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -109,6 +109,9 @@ config ADM6996_PHY
+ Currently supports the ADM6996FC and ADM6996M switches.
+ Support for FC is very limited.
+
++config MVSWITCH_PHY
++ tristate "Driver for Marvell 88E6060 switches"
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
++obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/target/linux/generic/patches-3.3/723-phy_ip175c.patch b/target/linux/generic/patches-3.3/723-phy_ip175c.patch
new file mode 100644
index 0000000..0ac339e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/723-phy_ip175c.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -112,6 +112,10 @@ config ADM6996_PHY
+ config MVSWITCH_PHY
+ tristate "Driver for Marvell 88E6060 switches"
+
++config IP17XX_PHY
++ tristate "Driver for IC+ IP17xx switches"
++ select SWCONFIG
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
++obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/target/linux/generic/patches-3.3/724-phy_ar8216.patch b/target/linux/generic/patches-3.3/724-phy_ar8216.patch
new file mode 100644
index 0000000..7a51dcc
--- /dev/null
+++ b/target/linux/generic/patches-3.3/724-phy_ar8216.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -116,6 +116,10 @@ config IP17XX_PHY
+ tristate "Driver for IC+ IP17xx switches"
+ select SWCONFIG
+
++config AR8216_PHY
++ tristate "Driver for Atheros AR8216 switches"
++ select SWCONFIG
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
+ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
++obj-$(CONFIG_AR8216_PHY) += ar8216.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/725-phy_rtl8306.patch b/target/linux/generic/patches-3.3/725-phy_rtl8306.patch
new file mode 100644
index 0000000..42cd274
--- /dev/null
+++ b/target/linux/generic/patches-3.3/725-phy_rtl8306.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -120,6 +120,10 @@ config AR8216_PHY
+ tristate "Driver for Atheros AR8216 switches"
+ select SWCONFIG
+
++config RTL8306_PHY
++ tristate "Driver for Realtek RTL8306S switches"
++ select SWCONFIG
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -19,6 +19,7 @@ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
+ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_AR8216_PHY) += ar8216.o
++obj-$(CONFIG_RTL8306_PHY) += rtl8306.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/726-phy_rtl8366.patch b/target/linux/generic/patches-3.3/726-phy_rtl8366.patch
new file mode 100644
index 0000000..622a432
--- /dev/null
+++ b/target/linux/generic/patches-3.3/726-phy_rtl8366.patch
@@ -0,0 +1,46 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -162,6 +162,31 @@ config MDIO_OCTEON
+
+ If in doubt, say Y.
+
++config RTL8366_SMI
++ tristate "Driver for the RTL8366 SMI interface"
++ depends on GENERIC_GPIO
++ ---help---
++ This module implements the SMI interface protocol which is used
++ by some RTL8366 ethernet switch devices via the generic GPIO API.
++
++if RTL8366_SMI
++
++config RTL8366S_PHY
++ tristate "Driver for the Realtek RTL8366S switch"
++ select SWCONFIG
++
++config RTL8366RB_PHY
++ tristate "Driver for the Realtek RTL8366RB switch"
++ select SWCONFIG
++
++config RTL8366S_PHY_DEBUG_FS
++ bool "RTL8366 switch driver DEBUG_FS support"
++ depends on RTL8366S_PHY || RTL8366RB_PHY
++ depends on DEBUG_FS
++ default n
++
++endif # RTL8366_SMI
++
+ endif # PHYLIB
+
+ config MICREL_KS8995MA
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -20,6 +20,9 @@ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_AR8216_PHY) += ar8216.o
+ obj-$(CONFIG_RTL8306_PHY) += rtl8306.o
++obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi.o
++obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o
++obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/727-phy-rtl8367.patch b/target/linux/generic/patches-3.3/727-phy-rtl8367.patch
new file mode 100644
index 0000000..820bda0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/727-phy-rtl8367.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -179,6 +179,10 @@ config RTL8366RB_PHY
+ tristate "Driver for the Realtek RTL8366RB switch"
+ select SWCONFIG
+
++config RTL8367_PHY
++ tristate "Driver for the Realtek RTL8367R/M switches"
++ select SWCONFIG
++
+ config RTL8366S_PHY_DEBUG_FS
+ bool "RTL8366 switch driver DEBUG_FS support"
+ depends on RTL8366S_PHY || RTL8366RB_PHY
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -23,6 +23,7 @@ obj-$(CONFIG_RTL8306_PHY) += rtl8306.o
+ obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi.o
+ obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o
+ obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o
++obj-$(CONFIG_RTL8367_PHY) += rtl8367.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/750-hostap_txpower.patch b/target/linux/generic/patches-3.3/750-hostap_txpower.patch
new file mode 100644
index 0000000..8e2ec9a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/750-hostap_txpower.patch
@@ -0,0 +1,154 @@
+--- a/drivers/net/wireless/hostap/hostap_ap.c
++++ b/drivers/net/wireless/hostap/hostap_ap.c
+@@ -2340,13 +2340,13 @@ int prism2_ap_get_sta_qual(local_info_t
+ addr[count].sa_family = ARPHRD_ETHER;
+ memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
+ if (sta->last_rx_silence == 0)
+- qual[count].qual = sta->last_rx_signal < 27 ?
+- 0 : (sta->last_rx_signal - 27) * 92 / 127;
++ qual[count].qual = (sta->last_rx_signal - 156) == 0 ?
++ 0 : (sta->last_rx_signal - 156) * 92 / 64;
+ else
+- qual[count].qual = sta->last_rx_signal -
+- sta->last_rx_silence - 35;
+- qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
+- qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
++ qual[count].qual = (sta->last_rx_signal -
++ sta->last_rx_silence) * 92 / 64;
++ qual[count].level = sta->last_rx_signal;
++ qual[count].noise = sta->last_rx_silence;
+ qual[count].updated = sta->last_rx_updated;
+
+ sta->last_rx_updated = IW_QUAL_DBM;
+@@ -2412,13 +2412,13 @@ int prism2_ap_translate_scan(struct net_
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVQUAL;
+ if (sta->last_rx_silence == 0)
+- iwe.u.qual.qual = sta->last_rx_signal < 27 ?
+- 0 : (sta->last_rx_signal - 27) * 92 / 127;
++ iwe.u.qual.qual = (sta->last_rx_signal -156) == 0 ?
++ 0 : (sta->last_rx_signal - 156) * 92 / 64;
+ else
+- iwe.u.qual.qual = sta->last_rx_signal -
+- sta->last_rx_silence - 35;
+- iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
+- iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
++ iwe.u.qual.qual = (sta->last_rx_signal -
++ sta->last_rx_silence) * 92 / 64;
++ iwe.u.qual.level = sta->last_rx_signal;
++ iwe.u.qual.noise = sta->last_rx_silence;
+ iwe.u.qual.updated = sta->last_rx_updated;
+ iwe.len = IW_EV_QUAL_LEN;
+ current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+--- a/drivers/net/wireless/hostap/hostap_config.h
++++ b/drivers/net/wireless/hostap/hostap_config.h
+@@ -45,4 +45,9 @@
+ */
+ /* #define PRISM2_NO_STATION_MODES */
+
++/* Enable TX power Setting functions
++ * (min att = -128 , max att = 127)
++ */
++#define RAW_TXPOWER_SETTING
++
+ #endif /* HOSTAP_CONFIG_H */
+--- a/drivers/net/wireless/hostap/hostap.h
++++ b/drivers/net/wireless/hostap/hostap.h
+@@ -90,6 +90,7 @@ extern const struct iw_handler_def hosta
+ extern const struct ethtool_ops prism2_ethtool_ops;
+
+ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
++int hostap_restore_power(struct net_device *dev);
+
+
+ #endif /* HOSTAP_H */
+--- a/drivers/net/wireless/hostap/hostap_hw.c
++++ b/drivers/net/wireless/hostap/hostap_hw.c
+@@ -932,6 +932,7 @@ static int hfa384x_set_rid(struct net_de
+ prism2_hw_reset(dev);
+ }
+
++ hostap_restore_power(dev);
+ return res;
+ }
+
+--- a/drivers/net/wireless/hostap/hostap_info.c
++++ b/drivers/net/wireless/hostap/hostap_info.c
+@@ -434,6 +434,11 @@ static void handle_info_queue_linkstatus
+ }
+
+ /* Get BSSID if we have a valid AP address */
++
++ if ( val == HFA384X_LINKSTATUS_CONNECTED ||
++ val == HFA384X_LINKSTATUS_DISCONNECTED )
++ hostap_restore_power(local->dev);
++
+ if (connected) {
+ netif_carrier_on(local->dev);
+ netif_carrier_on(local->ddev);
+--- a/drivers/net/wireless/hostap/hostap_ioctl.c
++++ b/drivers/net/wireless/hostap/hostap_ioctl.c
+@@ -1478,23 +1478,20 @@ static int prism2_txpower_hfa386x_to_dBm
+ val = 255;
+
+ tmp = val;
+- tmp >>= 2;
+
+- return -12 - tmp;
++ return tmp;
+ }
+
+ static u16 prism2_txpower_dBm_to_hfa386x(int val)
+ {
+ signed char tmp;
+
+- if (val > 20)
+- return 128;
+- else if (val < -43)
++ if (val > 127)
+ return 127;
++ else if (val < -128)
++ return 128;
+
+ tmp = val;
+- tmp = -12 - tmp;
+- tmp <<= 2;
+
+ return (unsigned char) tmp;
+ }
+@@ -4057,3 +4054,35 @@ int hostap_ioctl(struct net_device *dev,
+
+ return ret;
+ }
++
++/* BUG FIX: Restore power setting value when lost due to F/W bug */
++
++int hostap_restore_power(struct net_device *dev)
++{
++ struct hostap_interface *iface = netdev_priv(dev);
++ local_info_t *local = iface->local;
++
++ u16 val;
++ int ret = 0;
++
++ if (local->txpower_type == PRISM2_TXPOWER_OFF) {
++ val = 0xff; /* use all standby and sleep modes */
++ ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
++ HFA386X_CR_A_D_TEST_MODES2,
++ &val, NULL);
++ }
++
++#ifdef RAW_TXPOWER_SETTING
++ if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
++ val = HFA384X_TEST_CFG_BIT_ALC;
++ local->func->cmd(dev, HFA384X_CMDCODE_TEST |
++ (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
++ val = prism2_txpower_dBm_to_hfa386x(local->txpower);
++ ret = (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
++ HFA386X_CR_MANUAL_TX_POWER, &val, NULL));
++ }
++#endif /* RAW_TXPOWER_SETTING */
++ return (ret ? -EOPNOTSUPP : 0);
++}
++
++EXPORT_SYMBOL(hostap_restore_power);
diff --git a/target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch b/target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch
new file mode 100644
index 0000000..4802157
--- /dev/null
+++ b/target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch
@@ -0,0 +1,43 @@
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -51,6 +51,12 @@ config XEN_PCIDEV_FRONTEND
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
++config PCI_DISABLE_COMMON_QUIRKS
++ bool "PCI disable common quirks"
++ depends on PCI
++ help
++ If you don't know what to do here, say N.
++
+ config HT_IRQ
+ bool "Interrupts on hypertransport devices"
+ default y
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -105,6 +105,7 @@ static void __devinit quirk_mmio_always_
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_mmio_always_on);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /* The Mellanox Tavor device gives false positive parity errors
+ * Mark this device with a broken_parity_status, to allow
+ * PCI scanning code to "skip" this now blacklisted device.
+@@ -1990,7 +1991,9 @@ static void __devinit fixup_rev1_53c810(
+ }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /* Enable 1k I/O space granularity on the Intel P64H2 */
+ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
+ {
+@@ -2666,6 +2669,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT
+ quirk_msi_intx_disable_bug);
+
+ #endif /* CONFIG_PCI_MSI */
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
+ /* Allow manual resource allocation for PCI hotplug bridges
+ * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
diff --git a/target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch
new file mode 100644
index 0000000..c0a478b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch
@@ -0,0 +1,18 @@
+
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -431,6 +431,8 @@ reset_needed:
+ }
+ EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+ {
+ u16 cmd;
+@@ -877,3 +879,5 @@ static void __devinit quirk_usb_early_ha
+ quirk_usb_handoff_xhci(pdev);
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
++
++#endif
diff --git a/target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch b/target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch
new file mode 100644
index 0000000..ee50ff9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch
@@ -0,0 +1,84 @@
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -652,6 +652,71 @@ int __usb_get_extra_descriptor(char *buf
+ }
+ EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
+
++static struct usb_device *match_device_name(struct usb_device *dev,
++ const char *name)
++{
++ struct usb_device *ret_dev = NULL;
++ int child;
++
++ dev_dbg(&dev->dev, "check for name %s ...\n", name);
++
++ /* see if this device matches */
++ if (strcmp(dev_name(&dev->dev), name) == 0 ) {
++ dev_dbg(&dev->dev, "matched this device!\n");
++ ret_dev = usb_get_dev(dev);
++ goto exit;
++ }
++
++ /* look through all of the children of this device */
++ for (child = 0; child < dev->maxchild; ++child) {
++ if (dev->children[child]) {
++ usb_lock_device(dev->children[child]);
++ ret_dev = match_device_name(dev->children[child], name);
++ usb_unlock_device(dev->children[child]);
++ if (ret_dev)
++ goto exit;
++ }
++ }
++exit:
++ return ret_dev;
++}
++
++/**
++ * usb_find_device_by_name - find a specific usb device in the system
++ * @name: the name of the device to find
++ *
++ * Returns a pointer to a struct usb_device if such a specified usb
++ * device is present in the system currently. The usage count of the
++ * device will be incremented if a device is found. Make sure to call
++ * usb_put_dev() when the caller is finished with the device.
++ *
++ * If a device with the specified bus id is not found, NULL is returned.
++ */
++struct usb_device *usb_find_device_by_name(const char *name)
++{
++ struct list_head *buslist;
++ struct usb_bus *bus;
++ struct usb_device *dev = NULL;
++
++ mutex_lock(&usb_bus_list_lock);
++ for (buslist = usb_bus_list.next;
++ buslist != &usb_bus_list;
++ buslist = buslist->next) {
++ bus = container_of(buslist, struct usb_bus, bus_list);
++ if (!bus->root_hub)
++ continue;
++ usb_lock_device(bus->root_hub);
++ dev = match_device_name(bus->root_hub, name);
++ usb_unlock_device(bus->root_hub);
++ if (dev)
++ goto exit;
++ }
++exit:
++ mutex_unlock(&usb_bus_list_lock);
++ return dev;
++}
++EXPORT_SYMBOL_GPL(usb_find_device_by_name);
++
+ /**
+ * usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP
+ * @dev: device the buffer will be used with
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -531,6 +531,7 @@ extern int usb_lock_device_for_reset(str
+ extern int usb_reset_device(struct usb_device *dev);
+ extern void usb_queue_reset_device(struct usb_interface *dev);
+
++extern struct usb_device *usb_find_device_by_name(const char *name);
+
+ /* USB autosuspend and autoresume */
+ #ifdef CONFIG_USB_SUSPEND
diff --git a/target/linux/generic/patches-3.3/830-ledtrig_morse.patch b/target/linux/generic/patches-3.3/830-ledtrig_morse.patch
new file mode 100644
index 0000000..3283807
--- /dev/null
+++ b/target/linux/generic/patches-3.3/830-ledtrig_morse.patch
@@ -0,0 +1,28 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -480,4 +480,8 @@ config LEDS_TRIGGER_DEFAULT_ON
+ comment "iptables trigger is under Netfilter config (LED target)"
+ depends on LEDS_TRIGGERS
+
++config LEDS_TRIGGER_MORSE
++ tristate "LED Morse Trigger"
++ depends on LEDS_TRIGGERS
++
+ endif # NEW_LEDS
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -57,3 +57,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) +=
+ obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
+ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
+ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
++obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
+--- a/drivers/leds/ledtrig-morse.c
++++ b/drivers/leds/ledtrig-morse.c
+@@ -26,7 +26,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+-#include <linux/sysdev.h>
+ #include <linux/timer.h>
+ #include <linux/ctype.h>
+ #include <linux/leds.h>
diff --git a/target/linux/generic/patches-3.3/831-ledtrig_netdev.patch b/target/linux/generic/patches-3.3/831-ledtrig_netdev.patch
new file mode 100644
index 0000000..7f94b1c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/831-ledtrig_netdev.patch
@@ -0,0 +1,51 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -484,4 +484,11 @@ config LEDS_TRIGGER_MORSE
+ tristate "LED Morse Trigger"
+ depends on LEDS_TRIGGERS
+
++config LEDS_TRIGGER_NETDEV
++ tristate "LED Netdev Trigger"
++ depends on NET && LEDS_TRIGGERS
++ help
++ This allows LEDs to be controlled by network device activity.
++ If unsure, say Y.
++
+ endif # NEW_LEDS
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -58,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) +=
+ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
+ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+ obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
++obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
+--- a/drivers/leds/ledtrig-netdev.c
++++ b/drivers/leds/ledtrig-netdev.c
+@@ -22,7 +22,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+-#include <linux/sysdev.h>
+ #include <linux/netdevice.h>
+ #include <linux/timer.h>
+ #include <linux/ctype.h>
+@@ -307,8 +306,9 @@ done:
+ static void netdev_trig_timer(unsigned long arg)
+ {
+ struct led_netdev_data *trigger_data = (struct led_netdev_data *)arg;
+- const struct net_device_stats *dev_stats;
++ struct rtnl_link_stats64 *dev_stats;
+ unsigned new_activity;
++ struct rtnl_link_stats64 temp;
+
+ write_lock(&trigger_data->lock);
+
+@@ -318,7 +318,7 @@ static void netdev_trig_timer(unsigned l
+ goto no_restart;
+ }
+
+- dev_stats = dev_get_stats(trigger_data->net_dev);
++ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) +
+ ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0);
diff --git a/target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch b/target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch
new file mode 100644
index 0000000..8933497
--- /dev/null
+++ b/target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch
@@ -0,0 +1,31 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -491,4 +491,11 @@ config LEDS_TRIGGER_NETDEV
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
++config LEDS_TRIGGER_USBDEV
++ tristate "LED USB device Trigger"
++ depends on USB && LEDS_TRIGGERS
++ help
++ This allows LEDs to be controlled by the presence/activity of
++ an USB device. If unsure, say N.
++
+ endif # NEW_LEDS
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -59,3 +59,4 @@ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledt
+ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+ obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
+ obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
++obj-$(CONFIG_LEDS_TRIGGER_USBDEV) += ledtrig-usbdev.o
+--- a/drivers/leds/ledtrig-usbdev.c
++++ b/drivers/leds/ledtrig-usbdev.c
+@@ -24,7 +24,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+-#include <linux/sysdev.h>
+ #include <linux/timer.h>
+ #include <linux/ctype.h>
+ #include <linux/slab.h>
diff --git a/target/linux/generic/patches-3.3/833-gpio_buttons.patch b/target/linux/generic/patches-3.3/833-gpio_buttons.patch
new file mode 100644
index 0000000..b741b8c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/833-gpio_buttons.patch
@@ -0,0 +1,30 @@
+--- a/drivers/input/misc/Kconfig
++++ b/drivers/input/misc/Kconfig
+@@ -569,4 +569,20 @@ config INPUT_XEN_KBDDEV_FRONTEND
+ To compile this driver as a module, choose M here: the
+ module will be called xen-kbdfront.
+
++config INPUT_GPIO_BUTTONS
++ tristate "Polled GPIO buttons interface"
++ depends on GENERIC_GPIO
++ select INPUT_POLLDEV
++ help
++ This driver implements support for buttons connected
++ to GPIO pins of various CPUs (and some other chips).
++
++ Say Y here if your device has buttons connected
++ directly to such GPIO pins. Your board-specific
++ setup logic must also provide a platform device,
++ with configuration data saying which GPIOs are used.
++
++ To compile this driver as a module, choose M here: the
++ module will be called gpio-buttons.
++
+ endif
+--- a/drivers/input/misc/Makefile
++++ b/drivers/input/misc/Makefile
+@@ -53,3 +53,4 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wist
+ obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
+ obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
+ obj-$(CONFIG_INPUT_YEALINK) += yealink.o
++obj-$(CONFIG_INPUT_GPIO_BUTTONS) += gpio_buttons.o
diff --git a/target/linux/generic/patches-3.3/835-gpiodev.patch b/target/linux/generic/patches-3.3/835-gpiodev.patch
new file mode 100644
index 0000000..f41d5a6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/835-gpiodev.patch
@@ -0,0 +1,27 @@
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -511,6 +511,14 @@ config NSC_GPIO
+ pc8736x_gpio drivers. If those drivers are built as
+ modules, this one will be too, named nsc_gpio
+
++config GPIO_DEVICE
++ tristate "GPIO device support"
++ depends on GENERIC_GPIO
++ help
++ Say Y to enable Linux GPIO device support. This allows control of
++ GPIO pins using a character device
++
++
+ config RAW_DRIVER
+ tristate "RAW driver (/dev/raw/rawN)"
+ depends on BLOCK
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -47,6 +47,7 @@ obj-$(CONFIG_NWFLASH) += nwflash.o
+ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+ obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
+ obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
++obj-$(CONFIG_GPIO_DEVICE) += gpio_dev.o
+ obj-$(CONFIG_GPIO_TB0219) += tb0219.o
+ obj-$(CONFIG_TELCLOCK) += tlclk.o
+
diff --git a/target/linux/generic/patches-3.3/840-rtc7301.patch b/target/linux/generic/patches-3.3/840-rtc7301.patch
new file mode 100644
index 0000000..35dd3b8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/840-rtc7301.patch
@@ -0,0 +1,250 @@
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -719,6 +719,15 @@ config RTC_DRV_NUC900
+ If you say yes here you get support for the RTC subsystem of the
+ NUC910/NUC920 used in embedded systems.
+
++config RTC_DRV_RTC7301
++ tristate "Epson RTC-7301 SF/DG"
++ help
++ If you say Y here you will get support for the
++ Epson RTC-7301 SF/DG RTC chips.
++
++ This driver can also be built as a module. If so, the module
++ will be called rtc-7301.
++
+ comment "on-CPU RTC drivers"
+
+ config RTC_DRV_DAVINCI
+--- a/drivers/rtc/Makefile
++++ b/drivers/rtc/Makefile
+@@ -86,6 +86,7 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c
+ obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
+ obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
+ obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
++obj-$(CONFIG_RTC_DRV_RTC7301) += rtc-rtc7301.o
+ obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
+ obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
+ obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
+--- /dev/null
++++ b/drivers/rtc/rtc-rtc7301.c
+@@ -0,0 +1,219 @@
++/*
++ * Driver for Epson RTC-7301SF/DG
++ *
++ * Copyright (C) 2009 Jose Vasconcellos
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/rtc.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/delay.h>
++#include <linux/bcd.h>
++
++#define RTC_NAME "rtc7301"
++#define RTC_VERSION "0.1"
++
++/* Epson RTC-7301 register addresses */
++#define RTC7301_SEC 0x00
++#define RTC7301_SEC10 0x01
++#define RTC7301_MIN 0x02
++#define RTC7301_MIN10 0x03
++#define RTC7301_HOUR 0x04
++#define RTC7301_HOUR10 0x05
++#define RTC7301_WEEKDAY 0x06
++#define RTC7301_DAY 0x07
++#define RTC7301_DAY10 0x08
++#define RTC7301_MON 0x09
++#define RTC7301_MON10 0x0A
++#define RTC7301_YEAR 0x0B
++#define RTC7301_YEAR10 0x0C
++#define RTC7301_YEAR100 0x0D
++#define RTC7301_YEAR1000 0x0E
++#define RTC7301_CTRLREG 0x0F
++
++static uint8_t __iomem *rtc7301_base;
++
++#define read_reg(offset) (readb(rtc7301_base + offset) & 0xf)
++#define write_reg(offset, data) writeb(data, rtc7301_base + (offset))
++
++#define rtc7301_isbusy() (read_reg(RTC7301_CTRLREG) & 1)
++
++static void rtc7301_init_settings(void)
++{
++ int i;
++
++ write_reg(RTC7301_CTRLREG, 2);
++ write_reg(RTC7301_YEAR1000, 2);
++ udelay(122);
++
++ /* bank 1 */
++ write_reg(RTC7301_CTRLREG, 6);
++ for (i=0; i<15; i++)
++ write_reg(i, 0);
++
++ /* bank 2 */
++ write_reg(RTC7301_CTRLREG, 14);
++ for (i=0; i<15; i++)
++ write_reg(i, 0);
++ write_reg(RTC7301_CTRLREG, 0);
++}
++
++static int rtc7301_get_datetime(struct device *dev, struct rtc_time *dt)
++{
++ int cnt;
++ uint8_t buf[16];
++
++ cnt = 0;
++ while (rtc7301_isbusy()) {
++ udelay(244);
++ if (cnt++ > 100) {
++ dev_err(dev, "%s: timeout error %x\n", __func__, rtc7301_base[RTC7301_CTRLREG]);
++ return -EIO;
++ }
++ }
++
++ for (cnt=0; cnt<16; cnt++)
++ buf[cnt] = read_reg(cnt);
++
++ if (buf[RTC7301_SEC10] & 8) {
++ dev_err(dev, "%s: RTC not set\n", __func__);
++ return -EINVAL;
++ }
++
++ memset(dt, 0, sizeof(*dt));
++
++ dt->tm_sec = buf[RTC7301_SEC] + buf[RTC7301_SEC10]*10;
++ dt->tm_min = buf[RTC7301_MIN] + buf[RTC7301_MIN10]*10;
++ dt->tm_hour = buf[RTC7301_HOUR] + buf[RTC7301_HOUR10]*10;
++
++ dt->tm_mday = buf[RTC7301_DAY] + buf[RTC7301_DAY10]*10;
++ dt->tm_mon = buf[RTC7301_MON] + buf[RTC7301_MON10]*10 - 1;
++ dt->tm_year = buf[RTC7301_YEAR] + buf[RTC7301_YEAR10]*10 +
++ buf[RTC7301_YEAR100]*100 +
++ ((buf[RTC7301_YEAR1000] & 3)*1000) - 1900;
++
++ /* the rtc device may contain illegal values on power up
++ * according to the data sheet. make sure they are valid.
++ */
++
++ return rtc_valid_tm(dt);
++}
++
++static int rtc7301_set_datetime(struct device *dev, struct rtc_time *dt)
++{
++ int data;
++
++ data = dt->tm_year + 1900;
++ if (data >= 2100 || data < 1900)
++ return -EINVAL;
++
++ write_reg(RTC7301_CTRLREG, 2);
++ udelay(122);
++
++ data = bin2bcd(dt->tm_sec);
++ write_reg(RTC7301_SEC, data);
++ write_reg(RTC7301_SEC10, (data >> 4));
++
++ data = bin2bcd(dt->tm_min);
++ write_reg(RTC7301_MIN, data );
++ write_reg(RTC7301_MIN10, (data >> 4));
++
++ data = bin2bcd(dt->tm_hour);
++ write_reg(RTC7301_HOUR, data);
++ write_reg(RTC7301_HOUR10, (data >> 4));
++
++ data = bin2bcd(dt->tm_mday);
++ write_reg(RTC7301_DAY, data);
++ write_reg(RTC7301_DAY10, (data>> 4));
++
++ data = bin2bcd(dt->tm_mon + 1);
++ write_reg(RTC7301_MON, data);
++ write_reg(RTC7301_MON10, (data >> 4));
++
++ data = bin2bcd(dt->tm_year % 100);
++ write_reg(RTC7301_YEAR, data);
++ write_reg(RTC7301_YEAR10, (data >> 4));
++ data = bin2bcd((1900 + dt->tm_year) / 100);
++ write_reg(RTC7301_YEAR100, data);
++
++ data = bin2bcd(dt->tm_wday);
++ write_reg(RTC7301_WEEKDAY, data);
++
++ write_reg(RTC7301_CTRLREG, 0);
++
++ return 0;
++}
++
++static const struct rtc_class_ops rtc7301_rtc_ops = {
++ .read_time = rtc7301_get_datetime,
++ .set_time = rtc7301_set_datetime,
++};
++
++static int __devinit rtc7301_probe(struct platform_device *pdev)
++{
++ struct rtc_device *rtc;
++ struct resource *res;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ rtc7301_base = ioremap_nocache(res->start, 0x1000 /*res->end - res->start + 1*/);
++ if (!rtc7301_base)
++ return -EINVAL;
++
++ rtc = rtc_device_register(RTC_NAME, &pdev->dev,
++ &rtc7301_rtc_ops, THIS_MODULE);
++ if (IS_ERR(rtc)) {
++ iounmap(rtc7301_base);
++ return PTR_ERR(rtc);
++ }
++
++ platform_set_drvdata(pdev, rtc);
++
++ rtc7301_init_settings();
++ return 0;
++}
++
++static int __devexit rtc7301_remove(struct platform_device *pdev)
++{
++ struct rtc_device *rtc = platform_get_drvdata(pdev);
++
++ if (rtc)
++ rtc_device_unregister(rtc);
++ if (rtc7301_base)
++ iounmap(rtc7301_base);
++ return 0;
++}
++
++static struct platform_driver rtc7301_driver = {
++ .driver = {
++ .name = RTC_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = rtc7301_probe,
++ .remove = __devexit_p(rtc7301_remove),
++};
++
++static __init int rtc7301_init(void)
++{
++ return platform_driver_register(&rtc7301_driver);
++}
++module_init(rtc7301_init);
++
++static __exit void rtc7301_exit(void)
++{
++ platform_driver_unregister(&rtc7301_driver);
++}
++module_exit(rtc7301_exit);
++
++MODULE_DESCRIPTION("Epson 7301 RTC driver");
++MODULE_AUTHOR("Jose Vasconcellos <jvasco@verizon.net>");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:" RTC_NAME);
++MODULE_VERSION(RTC_VERSION);
diff --git a/target/linux/generic/patches-3.3/850-glamo_headers.patch b/target/linux/generic/patches-3.3/850-glamo_headers.patch
new file mode 100644
index 0000000..c75e1d6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/850-glamo_headers.patch
@@ -0,0 +1,21 @@
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -127,6 +127,7 @@
+ #define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
+ #define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
+ #define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
++#define FB_ACCEL_GLAMO 50 /* SMedia Glamo */
+ #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
+ #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
+ #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -144,6 +144,8 @@ header-y += generic_serial.h
+ header-y += genetlink.h
+ header-y += gfs2_ondisk.h
+ header-y += gigaset_dev.h
++header-y += glamofb.h
++header-y += glamo-engine.h
+ header-y += hdlc.h
+ header-y += hdlcdrv.h
+ header-y += hdreg.h
diff --git a/target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch b/target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch
new file mode 100644
index 0000000..7828869
--- /dev/null
+++ b/target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch
@@ -0,0 +1,58 @@
+Implement the SPI-GPIO delay function for busses that need speed limitation.
+
+--mb
+
+
+
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -22,6 +22,7 @@
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+ #include <linux/gpio.h>
++#include <linux/delay.h>
+
+ #include <linux/spi/spi.h>
+ #include <linux/spi/spi_bitbang.h>
+@@ -70,6 +71,7 @@ struct spi_gpio {
+ * #define SPI_MOSI_GPIO 120
+ * #define SPI_SCK_GPIO 121
+ * #define SPI_N_CHIPSEL 4
++ * #undef NEED_SPIDELAY
+ * #include "spi-gpio.c"
+ */
+
+@@ -77,6 +79,7 @@ struct spi_gpio {
+ #define DRIVER_NAME "spi_gpio"
+
+ #define GENERIC_BITBANG /* vs tight inlines */
++#define NEED_SPIDELAY 1
+
+ /* all functions referencing these symbols must define pdata */
+ #define SPI_MISO_GPIO ((pdata)->miso)
+@@ -121,12 +124,20 @@ static inline int getmiso(const struct s
+ #undef pdata
+
+ /*
+- * NOTE: this clocks "as fast as we can". It "should" be a function of the
+- * requested device clock. Software overhead means we usually have trouble
+- * reaching even one Mbit/sec (except when we can inline bitops), so for now
+- * we'll just assume we never need additional per-bit slowdowns.
++ * NOTE: to clock "as fast as we can", set spi_device.max_speed_hz
++ * and spi_transfer.speed_hz to 0.
++ * Otherwise this is a function of the requested device clock.
++ * Software overhead means we usually have trouble
++ * reaching even one Mbit/sec (except when we can inline bitops). So on small
++ * embedded devices with fast SPI slaves you usually don't need a delay.
+ */
+-#define spidelay(nsecs) do {} while (0)
++static inline void spidelay(unsigned nsecs)
++{
++#ifdef NEED_SPIDELAY
++ if (unlikely(nsecs))
++ ndelay(nsecs);
++#endif /* NEED_SPIDELAY */
++}
+
+ #include "spi-bitbang-txrx.h"
+
diff --git a/target/linux/generic/patches-3.3/862-gpio_spi_driver.patch b/target/linux/generic/patches-3.3/862-gpio_spi_driver.patch
new file mode 100644
index 0000000..6cbea32
--- /dev/null
+++ b/target/linux/generic/patches-3.3/862-gpio_spi_driver.patch
@@ -0,0 +1,373 @@
+THIS CODE IS DEPRECATED.
+
+Please use the new mainline SPI-GPIO driver, as of 2.6.29.
+
+--mb
+
+
+
+---
+ drivers/spi/Kconfig | 9 +
+ drivers/spi/Makefile | 1
+ drivers/spi/spi_gpio_old.c | 251 +++++++++++++++++++++++++++++++++++++++
+ include/linux/spi/spi_gpio_old.h | 73 +++++++++++
+ 4 files changed, 334 insertions(+)
+
+--- /dev/null
++++ b/include/linux/spi/spi_gpio_old.h
+@@ -0,0 +1,73 @@
++/*
++ * spi_gpio interface to platform code
++ *
++ * Copyright (c) 2008 Piotr Skamruk
++ * Copyright (c) 2008 Michael Buesch
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef _LINUX_SPI_SPI_GPIO
++#define _LINUX_SPI_SPI_GPIO
++
++#include <linux/types.h>
++#include <linux/spi/spi.h>
++
++
++/**
++ * struct spi_gpio_platform_data - Data definitions for a SPI-GPIO device.
++ *
++ * This structure holds information about a GPIO-based SPI device.
++ *
++ * @pin_clk: The GPIO pin number of the CLOCK pin.
++ *
++ * @pin_miso: The GPIO pin number of the MISO pin.
++ *
++ * @pin_mosi: The GPIO pin number of the MOSI pin.
++ *
++ * @pin_cs: The GPIO pin number of the CHIPSELECT pin.
++ *
++ * @cs_activelow: If true, the chip is selected when the CS line is low.
++ *
++ * @no_spi_delay: If true, no delay is done in the lowlevel bitbanging.
++ * Note that doing no delay is not standards compliant,
++ * but it might be needed to speed up transfers on some
++ * slow embedded machines.
++ *
++ * @boardinfo_setup: This callback is called after the
++ * SPI master device was registered, but before the
++ * device is registered.
++ * @boardinfo_setup_data: Data argument passed to boardinfo_setup().
++ */
++struct spi_gpio_platform_data {
++ unsigned int pin_clk;
++ unsigned int pin_miso;
++ unsigned int pin_mosi;
++ unsigned int pin_cs;
++ bool cs_activelow;
++ bool no_spi_delay;
++ int (*boardinfo_setup)(struct spi_board_info *bi,
++ struct spi_master *master,
++ void *data);
++ void *boardinfo_setup_data;
++};
++
++/**
++ * SPI_GPIO_PLATDEV_NAME - The platform device name string.
++ *
++ * The name string that has to be used for platform_device_alloc
++ * when allocating a spi-gpio device.
++ */
++#define SPI_GPIO_PLATDEV_NAME "spi-gpio"
++
++/**
++ * spi_gpio_next_id - Get another platform device ID number.
++ *
++ * This returns the next platform device ID number that has to be used
++ * for platform_device_alloc. The ID is opaque and should not be used for
++ * anything else.
++ */
++int spi_gpio_next_id(void);
++
++#endif /* _LINUX_SPI_SPI_GPIO */
+--- /dev/null
++++ b/drivers/spi/spi_gpio_old.c
+@@ -0,0 +1,251 @@
++/*
++ * Bitbanging SPI bus driver using GPIO API
++ *
++ * Copyright (c) 2008 Piotr Skamruk
++ * Copyright (c) 2008 Michael Buesch
++ *
++ * based on spi_s3c2410_gpio.c
++ * Copyright (c) 2006 Ben Dooks
++ * Copyright (c) 2006 Simtec Electronics
++ * and on i2c-gpio.c
++ * Copyright (C) 2007 Atmel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++#include <linux/spi/spi_gpio_old.h>
++#include <linux/gpio.h>
++#include <asm/atomic.h>
++
++
++struct spi_gpio {
++ struct spi_bitbang bitbang;
++ struct spi_gpio_platform_data *info;
++ struct platform_device *pdev;
++ struct spi_board_info bi;
++};
++
++
++static inline struct spi_gpio *spidev_to_sg(struct spi_device *dev)
++{
++ return dev->controller_data;
++}
++
++static inline void setsck(struct spi_device *dev, int val)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++ gpio_set_value(sp->info->pin_clk, val ? 1 : 0);
++}
++
++static inline void setmosi(struct spi_device *dev, int val)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++ gpio_set_value(sp->info->pin_mosi, val ? 1 : 0);
++}
++
++static inline u32 getmiso(struct spi_device *dev)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++ return gpio_get_value(sp->info->pin_miso) ? 1 : 0;
++}
++
++static inline void do_spidelay(struct spi_device *dev, unsigned nsecs)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++
++ if (!sp->info->no_spi_delay)
++ ndelay(nsecs);
++}
++
++#define spidelay(nsecs) do { \
++ /* Steal the spi_device pointer from our caller. \
++ * The bitbang-API should probably get fixed here... */ \
++ do_spidelay(spi, nsecs); \
++ } while (0)
++
++#define EXPAND_BITBANG_TXRX
++#include "spi-bitbang-txrx.h"
++
++static u32 spi_gpio_txrx_mode0(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
++}
++
++static u32 spi_gpio_txrx_mode1(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
++}
++
++static u32 spi_gpio_txrx_mode2(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
++}
++
++static u32 spi_gpio_txrx_mode3(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
++}
++
++static void spi_gpio_chipselect(struct spi_device *dev, int on)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++
++ if (sp->info->cs_activelow)
++ on = !on;
++ gpio_set_value(sp->info->pin_cs, on ? 1 : 0);
++}
++
++static int spi_gpio_probe(struct platform_device *pdev)
++{
++ struct spi_master *master;
++ struct spi_gpio_platform_data *pdata;
++ struct spi_gpio *sp;
++ struct spi_device *spidev;
++ int err;
++
++ pdata = pdev->dev.platform_data;
++ if (!pdata)
++ return -ENXIO;
++
++ err = -ENOMEM;
++ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_gpio));
++ if (!master)
++ goto err_alloc_master;
++
++ sp = spi_master_get_devdata(master);
++ platform_set_drvdata(pdev, sp);
++ sp->info = pdata;
++
++ err = gpio_request(pdata->pin_clk, "spi_clock");
++ if (err)
++ goto err_request_clk;
++ err = gpio_request(pdata->pin_mosi, "spi_mosi");
++ if (err)
++ goto err_request_mosi;
++ err = gpio_request(pdata->pin_miso, "spi_miso");
++ if (err)
++ goto err_request_miso;
++ err = gpio_request(pdata->pin_cs, "spi_cs");
++ if (err)
++ goto err_request_cs;
++
++ sp->bitbang.master = spi_master_get(master);
++ sp->bitbang.master->bus_num = -1;
++ sp->bitbang.master->num_chipselect = 1;
++ sp->bitbang.chipselect = spi_gpio_chipselect;
++ sp->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_mode0;
++ sp->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_mode1;
++ sp->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_mode2;
++ sp->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_mode3;
++
++ gpio_direction_output(pdata->pin_clk, 0);
++ gpio_direction_output(pdata->pin_mosi, 0);
++ gpio_direction_output(pdata->pin_cs,
++ pdata->cs_activelow ? 1 : 0);
++ gpio_direction_input(pdata->pin_miso);
++
++ err = spi_bitbang_start(&sp->bitbang);
++ if (err)
++ goto err_no_bitbang;
++ err = pdata->boardinfo_setup(&sp->bi, master,
++ pdata->boardinfo_setup_data);
++ if (err)
++ goto err_bi_setup;
++ sp->bi.controller_data = sp;
++ spidev = spi_new_device(master, &sp->bi);
++ if (!spidev)
++ goto err_new_dev;
++
++ return 0;
++
++err_new_dev:
++err_bi_setup:
++ spi_bitbang_stop(&sp->bitbang);
++err_no_bitbang:
++ spi_master_put(sp->bitbang.master);
++ gpio_free(pdata->pin_cs);
++err_request_cs:
++ gpio_free(pdata->pin_miso);
++err_request_miso:
++ gpio_free(pdata->pin_mosi);
++err_request_mosi:
++ gpio_free(pdata->pin_clk);
++err_request_clk:
++ kfree(master);
++
++err_alloc_master:
++ return err;
++}
++
++static int __devexit spi_gpio_remove(struct platform_device *pdev)
++{
++ struct spi_gpio *sp;
++ struct spi_gpio_platform_data *pdata;
++
++ pdata = pdev->dev.platform_data;
++ sp = platform_get_drvdata(pdev);
++
++ gpio_free(pdata->pin_clk);
++ gpio_free(pdata->pin_mosi);
++ gpio_free(pdata->pin_miso);
++ gpio_free(pdata->pin_cs);
++ spi_bitbang_stop(&sp->bitbang);
++ spi_master_put(sp->bitbang.master);
++
++ return 0;
++}
++
++static struct platform_driver spi_gpio_driver = {
++ .driver = {
++ .name = SPI_GPIO_PLATDEV_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = spi_gpio_probe,
++ .remove = __devexit_p(spi_gpio_remove),
++};
++
++int spi_gpio_next_id(void)
++{
++ static atomic_t counter = ATOMIC_INIT(-1);
++
++ return atomic_inc_return(&counter);
++}
++EXPORT_SYMBOL(spi_gpio_next_id);
++
++static int __init spi_gpio_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&spi_gpio_driver);
++ if (err)
++ printk(KERN_ERR "spi-gpio: register failed: %d\n", err);
++
++ return err;
++}
++module_init(spi_gpio_init);
++
++static void __exit spi_gpio_exit(void)
++{
++ platform_driver_unregister(&spi_gpio_driver);
++}
++module_exit(spi_gpio_exit);
++
++MODULE_AUTHOR("Piot Skamruk <piotr.skamruk at gmail.com>");
++MODULE_AUTHOR("Michael Buesch");
++MODULE_DESCRIPTION("Platform independent GPIO bitbanging SPI driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -154,6 +154,15 @@ config SPI_GPIO
+ GPIO operations, you should be able to leverage that for better
+ speed with a custom version of this driver; see the source code.
+
++config SPI_GPIO_OLD
++ tristate "Old GPIO API based bitbanging SPI controller (DEPRECATED)"
++ depends on SPI_MASTER && GENERIC_GPIO
++ select SPI_BITBANG
++ help
++ This code is deprecated. Please use the new mainline SPI-GPIO driver.
++
++ If unsure, say N.
++
+ config SPI_IMX
+ tristate "Freescale i.MX SPI controllers"
+ depends on ARCH_MXC
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -29,6 +29,7 @@ obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-li
+ obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+ obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
+ obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
++obj-$(CONFIG_SPI_GPIO_OLD) += spi_gpio_old.o
+ obj-$(CONFIG_SPI_IMX) += spi-imx.o
+ obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
diff --git a/target/linux/generic/patches-3.3/863-gpiommc.patch b/target/linux/generic/patches-3.3/863-gpiommc.patch
new file mode 100644
index 0000000..9a5f811a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/863-gpiommc.patch
@@ -0,0 +1,844 @@
+--- /dev/null
++++ b/drivers/mmc/host/gpiommc.c
+@@ -0,0 +1,609 @@
++/*
++ * Driver an MMC/SD card on a bitbanging GPIO SPI bus.
++ * This module hooks up the mmc_spi and spi_gpio modules and also
++ * provides a configfs interface.
++ *
++ * Copyright 2008 Michael Buesch <mb@bu3sch.de>
++ *
++ * Licensed under the GNU/GPL. See COPYING for details.
++ */
++
++#include <linux/module.h>
++#include <linux/mmc/gpiommc.h>
++#include <linux/platform_device.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/spi/spi_gpio_old.h>
++#include <linux/configfs.h>
++#include <linux/gpio.h>
++#include <asm/atomic.h>
++
++
++#define PFX "gpio-mmc: "
++
++
++struct gpiommc_device {
++ struct platform_device *pdev;
++ struct platform_device *spi_pdev;
++ struct spi_board_info boardinfo;
++};
++
++
++MODULE_DESCRIPTION("GPIO based MMC driver");
++MODULE_AUTHOR("Michael Buesch");
++MODULE_LICENSE("GPL");
++
++
++static int gpiommc_boardinfo_setup(struct spi_board_info *bi,
++ struct spi_master *master,
++ void *data)
++{
++ struct gpiommc_device *d = data;
++ struct gpiommc_platform_data *pdata = d->pdev->dev.platform_data;
++
++ /* Bind the SPI master to the MMC-SPI host driver. */
++ strlcpy(bi->modalias, "mmc_spi", sizeof(bi->modalias));
++
++ bi->max_speed_hz = pdata->max_bus_speed;
++ bi->bus_num = master->bus_num;
++ bi->mode = pdata->mode;
++
++ return 0;
++}
++
++static int gpiommc_probe(struct platform_device *pdev)
++{
++ struct gpiommc_platform_data *mmc_pdata = pdev->dev.platform_data;
++ struct spi_gpio_platform_data spi_pdata;
++ struct gpiommc_device *d;
++ int err;
++
++ err = -ENXIO;
++ if (!mmc_pdata)
++ goto error;
++
++#ifdef CONFIG_MMC_SPI_MODULE
++ err = request_module("mmc_spi");
++ if (err) {
++ printk(KERN_WARNING PFX
++ "Failed to request mmc_spi module.\n");
++ }
++#endif /* CONFIG_MMC_SPI_MODULE */
++
++ /* Allocate the GPIO-MMC device */
++ err = -ENOMEM;
++ d = kzalloc(sizeof(*d), GFP_KERNEL);
++ if (!d)
++ goto error;
++ d->pdev = pdev;
++
++ /* Create the SPI-GPIO device */
++ d->spi_pdev = platform_device_alloc(SPI_GPIO_PLATDEV_NAME,
++ spi_gpio_next_id());
++ if (!d->spi_pdev)
++ goto err_free_d;
++
++ memset(&spi_pdata, 0, sizeof(spi_pdata));
++ spi_pdata.pin_clk = mmc_pdata->pins.gpio_clk;
++ spi_pdata.pin_miso = mmc_pdata->pins.gpio_do;
++ spi_pdata.pin_mosi = mmc_pdata->pins.gpio_di;
++ spi_pdata.pin_cs = mmc_pdata->pins.gpio_cs;
++ spi_pdata.cs_activelow = mmc_pdata->pins.cs_activelow;
++ spi_pdata.no_spi_delay = mmc_pdata->no_spi_delay;
++ spi_pdata.boardinfo_setup = gpiommc_boardinfo_setup;
++ spi_pdata.boardinfo_setup_data = d;
++
++ err = platform_device_add_data(d->spi_pdev, &spi_pdata,
++ sizeof(spi_pdata));
++ if (err)
++ goto err_free_pdev;
++ err = platform_device_add(d->spi_pdev);
++ if (err)
++ goto err_free_pdata;
++ platform_set_drvdata(pdev, d);
++
++ printk(KERN_INFO PFX "MMC-Card \"%s\" "
++ "attached to GPIO pins di=%u, do=%u, clk=%u, cs=%u\n",
++ mmc_pdata->name, mmc_pdata->pins.gpio_di,
++ mmc_pdata->pins.gpio_do,
++ mmc_pdata->pins.gpio_clk,
++ mmc_pdata->pins.gpio_cs);
++
++ return 0;
++
++err_free_pdata:
++ kfree(d->spi_pdev->dev.platform_data);
++ d->spi_pdev->dev.platform_data = NULL;
++err_free_pdev:
++ platform_device_put(d->spi_pdev);
++err_free_d:
++ kfree(d);
++error:
++ return err;
++}
++
++static int gpiommc_remove(struct platform_device *pdev)
++{
++ struct gpiommc_device *d = platform_get_drvdata(pdev);
++ struct gpiommc_platform_data *pdata = d->pdev->dev.platform_data;
++
++ platform_device_unregister(d->spi_pdev);
++ printk(KERN_INFO PFX "GPIO based MMC-Card \"%s\" removed\n",
++ pdata->name);
++ platform_device_put(d->spi_pdev);
++
++ return 0;
++}
++
++#ifdef CONFIG_GPIOMMC_CONFIGFS
++
++/* A device that was created through configfs */
++struct gpiommc_configfs_device {
++ struct config_item item;
++ /* The platform device, after registration. */
++ struct platform_device *pdev;
++ /* The configuration */
++ struct gpiommc_platform_data pdata;
++};
++
++#define GPIO_INVALID -1
++
++static inline bool gpiommc_is_registered(struct gpiommc_configfs_device *dev)
++{
++ return (dev->pdev != NULL);
++}
++
++static inline struct gpiommc_configfs_device *ci_to_gpiommc(struct config_item *item)
++{
++ return item ? container_of(item, struct gpiommc_configfs_device, item) : NULL;
++}
++
++static struct configfs_attribute gpiommc_attr_DI = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_data_in",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_DO = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_data_out",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_CLK = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_clock",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_CS = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_chipselect",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_CS_activelow = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_chipselect_activelow",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_spimode = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "spi_mode",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_spidelay = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "spi_delay",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_max_bus_speed = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "max_bus_speed",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_register = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "register",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute *gpiommc_config_attrs[] = {
++ &gpiommc_attr_DI,
++ &gpiommc_attr_DO,
++ &gpiommc_attr_CLK,
++ &gpiommc_attr_CS,
++ &gpiommc_attr_CS_activelow,
++ &gpiommc_attr_spimode,
++ &gpiommc_attr_spidelay,
++ &gpiommc_attr_max_bus_speed,
++ &gpiommc_attr_register,
++ NULL,
++};
++
++static ssize_t gpiommc_config_attr_show(struct config_item *item,
++ struct configfs_attribute *attr,
++ char *page)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++ ssize_t count = 0;
++ unsigned int gpio;
++ int err = 0;
++
++ if (attr == &gpiommc_attr_DI) {
++ gpio = dev->pdata.pins.gpio_di;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_DO) {
++ gpio = dev->pdata.pins.gpio_do;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CLK) {
++ gpio = dev->pdata.pins.gpio_clk;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS) {
++ gpio = dev->pdata.pins.gpio_cs;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS_activelow) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ dev->pdata.pins.cs_activelow);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spimode) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ dev->pdata.mode);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spidelay) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ !dev->pdata.no_spi_delay);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_max_bus_speed) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ dev->pdata.max_bus_speed);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_register) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ gpiommc_is_registered(dev));
++ goto out;
++ }
++ WARN_ON(1);
++ err = -ENOSYS;
++out:
++ return err ? err : count;
++}
++
++static int gpiommc_do_register(struct gpiommc_configfs_device *dev,
++ const char *name)
++{
++ int err;
++
++ if (gpiommc_is_registered(dev))
++ return 0;
++
++ if (!gpio_is_valid(dev->pdata.pins.gpio_di) ||
++ !gpio_is_valid(dev->pdata.pins.gpio_do) ||
++ !gpio_is_valid(dev->pdata.pins.gpio_clk) ||
++ !gpio_is_valid(dev->pdata.pins.gpio_cs)) {
++ printk(KERN_ERR PFX
++ "configfs: Invalid GPIO pin number(s)\n");
++ return -EINVAL;
++ }
++
++ strlcpy(dev->pdata.name, name,
++ sizeof(dev->pdata.name));
++
++ dev->pdev = platform_device_alloc(GPIOMMC_PLATDEV_NAME,
++ gpiommc_next_id());
++ if (!dev->pdev)
++ return -ENOMEM;
++ err = platform_device_add_data(dev->pdev, &dev->pdata,
++ sizeof(dev->pdata));
++ if (err) {
++ platform_device_put(dev->pdev);
++ return err;
++ }
++ err = platform_device_add(dev->pdev);
++ if (err) {
++ platform_device_put(dev->pdev);
++ return err;
++ }
++
++ return 0;
++}
++
++static void gpiommc_do_unregister(struct gpiommc_configfs_device *dev)
++{
++ if (!gpiommc_is_registered(dev))
++ return;
++
++ platform_device_unregister(dev->pdev);
++ dev->pdev = NULL;
++}
++
++static ssize_t gpiommc_config_attr_store(struct config_item *item,
++ struct configfs_attribute *attr,
++ const char *page, size_t count)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++ int err = -EINVAL;
++ unsigned long data;
++
++ if (attr == &gpiommc_attr_register) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data == 1)
++ err = gpiommc_do_register(dev, item->ci_name);
++ if (data == 0) {
++ gpiommc_do_unregister(dev);
++ err = 0;
++ }
++ goto out;
++ }
++
++ if (gpiommc_is_registered(dev)) {
++ /* The rest of the config parameters can only be set
++ * as long as the device is not registered, yet. */
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (attr == &gpiommc_attr_DI) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_di = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_DO) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_do = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CLK) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_clk = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_cs = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS_activelow) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data != 0 && data != 1)
++ goto out;
++ dev->pdata.pins.cs_activelow = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spimode) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ switch (data) {
++ case 0:
++ dev->pdata.mode = SPI_MODE_0;
++ break;
++ case 1:
++ dev->pdata.mode = SPI_MODE_1;
++ break;
++ case 2:
++ dev->pdata.mode = SPI_MODE_2;
++ break;
++ case 3:
++ dev->pdata.mode = SPI_MODE_3;
++ break;
++ default:
++ goto out;
++ }
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spidelay) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data != 0 && data != 1)
++ goto out;
++ dev->pdata.no_spi_delay = !data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_max_bus_speed) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data > UINT_MAX)
++ goto out;
++ dev->pdata.max_bus_speed = data;
++ err = 0;
++ goto out;
++ }
++ WARN_ON(1);
++ err = -ENOSYS;
++out:
++ return err ? err : count;
++}
++
++static void gpiommc_config_item_release(struct config_item *item)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++
++ kfree(dev);
++}
++
++static struct configfs_item_operations gpiommc_config_item_ops = {
++ .release = gpiommc_config_item_release,
++ .show_attribute = gpiommc_config_attr_show,
++ .store_attribute = gpiommc_config_attr_store,
++};
++
++static struct config_item_type gpiommc_dev_ci_type = {
++ .ct_item_ops = &gpiommc_config_item_ops,
++ .ct_attrs = gpiommc_config_attrs,
++ .ct_owner = THIS_MODULE,
++};
++
++static struct config_item *gpiommc_make_item(struct config_group *group,
++ const char *name)
++{
++ struct gpiommc_configfs_device *dev;
++
++ if (strlen(name) > GPIOMMC_MAX_NAMELEN) {
++ printk(KERN_ERR PFX "configfs: device name too long\n");
++ return NULL;
++ }
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev)
++ return NULL;
++
++ config_item_init_type_name(&dev->item, name,
++ &gpiommc_dev_ci_type);
++
++ /* Assign default configuration */
++ dev->pdata.pins.gpio_di = GPIO_INVALID;
++ dev->pdata.pins.gpio_do = GPIO_INVALID;
++ dev->pdata.pins.gpio_clk = GPIO_INVALID;
++ dev->pdata.pins.gpio_cs = GPIO_INVALID;
++ dev->pdata.pins.cs_activelow = 1;
++ dev->pdata.mode = SPI_MODE_0;
++ dev->pdata.no_spi_delay = 0;
++ dev->pdata.max_bus_speed = 5000000; /* 5 MHz */
++
++ return &(dev->item);
++}
++
++static void gpiommc_drop_item(struct config_group *group,
++ struct config_item *item)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++
++ gpiommc_do_unregister(dev);
++ kfree(dev);
++}
++
++static struct configfs_group_operations gpiommc_ct_group_ops = {
++ .make_item = gpiommc_make_item,
++ .drop_item = gpiommc_drop_item,
++};
++
++static struct config_item_type gpiommc_ci_type = {
++ .ct_group_ops = &gpiommc_ct_group_ops,
++ .ct_owner = THIS_MODULE,
++};
++
++static struct configfs_subsystem gpiommc_subsys = {
++ .su_group = {
++ .cg_item = {
++ .ci_namebuf = GPIOMMC_PLATDEV_NAME,
++ .ci_type = &gpiommc_ci_type,
++ },
++ },
++ .su_mutex = __MUTEX_INITIALIZER(gpiommc_subsys.su_mutex),
++};
++
++#endif /* CONFIG_GPIOMMC_CONFIGFS */
++
++static struct platform_driver gpiommc_plat_driver = {
++ .probe = gpiommc_probe,
++ .remove = gpiommc_remove,
++ .driver = {
++ .name = GPIOMMC_PLATDEV_NAME,
++ .owner = THIS_MODULE,
++ },
++};
++
++int gpiommc_next_id(void)
++{
++ static atomic_t counter = ATOMIC_INIT(-1);
++
++ return atomic_inc_return(&counter);
++}
++EXPORT_SYMBOL(gpiommc_next_id);
++
++static int __init gpiommc_modinit(void)
++{
++ int err;
++
++ err = platform_driver_register(&gpiommc_plat_driver);
++ if (err)
++ return err;
++
++#ifdef CONFIG_GPIOMMC_CONFIGFS
++ config_group_init(&gpiommc_subsys.su_group);
++ err = configfs_register_subsystem(&gpiommc_subsys);
++ if (err) {
++ platform_driver_unregister(&gpiommc_plat_driver);
++ return err;
++ }
++#endif /* CONFIG_GPIOMMC_CONFIGFS */
++
++ return 0;
++}
++module_init(gpiommc_modinit);
++
++static void __exit gpiommc_modexit(void)
++{
++#ifdef CONFIG_GPIOMMC_CONFIGFS
++ configfs_unregister_subsystem(&gpiommc_subsys);
++#endif
++ platform_driver_unregister(&gpiommc_plat_driver);
++}
++module_exit(gpiommc_modexit);
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -474,6 +474,31 @@ config MMC_SDHI
+ This provides support for the SDHI SD/SDIO controller found in
+ SuperH and ARM SH-Mobile SoCs
+
++config GPIOMMC
++ tristate "MMC/SD over GPIO-based SPI"
++ depends on MMC && MMC_SPI && SPI_GPIO_OLD
++ help
++ This driver hooks up the mmc_spi and spi_gpio modules so that
++ MMC/SD cards can be used on a GPIO based bus by bitbanging
++ the SPI protocol in software.
++
++ This driver provides a configfs interface to dynamically create
++ and destroy GPIO-based MMC/SD card devices. It also provides
++ a platform device interface API.
++ See Documentation/gpiommc.txt for details.
++
++ The module will be called gpiommc.
++
++ If unsure, say N.
++
++config GPIOMMC_CONFIGFS
++ bool
++ depends on GPIOMMC && CONFIGFS_FS
++ default y
++ help
++ This option automatically enables configfs support for gpiommc
++ if configfs is available.
++
+ config MMC_CB710
+ tristate "ENE CB710 MMC/SD Interface support"
+ depends on PCI
+--- a/drivers/mmc/host/Makefile
++++ b/drivers/mmc/host/Makefile
+@@ -37,6 +37,7 @@ tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_S
+ obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
+ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
++obj-$(CONFIG_GPIOMMC) += gpiommc.o
+ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+ obj-$(CONFIG_MMC_DW) += dw_mmc.o
+ obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
+--- /dev/null
++++ b/include/linux/mmc/gpiommc.h
+@@ -0,0 +1,71 @@
++/*
++ * Device driver for MMC/SD cards driven over a GPIO bus.
++ *
++ * Copyright (c) 2008 Michael Buesch
++ *
++ * Licensed under the GNU/GPL version 2.
++ */
++#ifndef LINUX_GPIOMMC_H_
++#define LINUX_GPIOMMC_H_
++
++#include <linux/types.h>
++
++
++#define GPIOMMC_MAX_NAMELEN 15
++#define GPIOMMC_MAX_NAMELEN_STR __stringify(GPIOMMC_MAX_NAMELEN)
++
++/**
++ * struct gpiommc_pins - Hardware pin assignments
++ *
++ * @gpio_di: The GPIO number of the DATA IN pin
++ * @gpio_do: The GPIO number of the DATA OUT pin
++ * @gpio_clk: The GPIO number of the CLOCK pin
++ * @gpio_cs: The GPIO number of the CHIPSELECT pin
++ * @cs_activelow: If true, the chip is considered selected if @gpio_cs is low.
++ */
++struct gpiommc_pins {
++ unsigned int gpio_di;
++ unsigned int gpio_do;
++ unsigned int gpio_clk;
++ unsigned int gpio_cs;
++ bool cs_activelow;
++};
++
++/**
++ * struct gpiommc_platform_data - Platform data for a MMC-over-SPI-GPIO device.
++ *
++ * @name: The unique name string of the device.
++ * @pins: The hardware pin assignments.
++ * @mode: The hardware mode. This is either SPI_MODE_0,
++ * SPI_MODE_1, SPI_MODE_2 or SPI_MODE_3. See the SPI documentation.
++ * @no_spi_delay: Do not use delays in the lowlevel SPI bitbanging code.
++ * This is not standards compliant, but may be required for some
++ * embedded machines to gain reasonable speed.
++ * @max_bus_speed: The maximum speed of the SPI bus, in Hertz.
++ */
++struct gpiommc_platform_data {
++ char name[GPIOMMC_MAX_NAMELEN + 1];
++ struct gpiommc_pins pins;
++ u8 mode;
++ bool no_spi_delay;
++ unsigned int max_bus_speed;
++};
++
++/**
++ * GPIOMMC_PLATDEV_NAME - The platform device name string.
++ *
++ * The name string that has to be used for platform_device_alloc
++ * when allocating a gpiommc device.
++ */
++#define GPIOMMC_PLATDEV_NAME "gpiommc"
++
++/**
++ * gpiommc_next_id - Get another platform device ID number.
++ *
++ * This returns the next platform device ID number that has to be used
++ * for platform_device_alloc. The ID is opaque and should not be used for
++ * anything else.
++ */
++int gpiommc_next_id(void);
++
++#endif /* LINUX_GPIOMMC_H_ */
+--- /dev/null
++++ b/Documentation/gpiommc.txt
+@@ -0,0 +1,97 @@
++GPIOMMC - Driver for an MMC/SD card on a bitbanging GPIO SPI bus
++================================================================
++
++The gpiommc module hooks up the mmc_spi and spi_gpio modules for running an
++MMC or SD card on GPIO pins.
++
++Two interfaces for registering a new MMC/SD card device are provided:
++A static platform-device based mechanism and a dynamic configfs based interface.
++
++
++Registering devices via platform-device
++=======================================
++
++The platform-device interface is used for registering MMC/SD devices that are
++part of the hardware platform. This is most useful only for embedded machines
++with MMC/SD devices statically connected to the platform GPIO bus.
++
++The data structures are declared in <linux/mmc/gpiommc.h>.
++
++To register a new device, define an instance of struct gpiommc_platform_data.
++This structure holds any information about how the device is hooked up to the
++GPIO pins and what hardware modes the device supports. See the docbook-style
++documentation in the header file for more information on the struct fields.
++
++Then allocate a new instance of a platform device by doing:
++
++ pdev = platform_device_alloc(GPIOMMC_PLATDEV_NAME, gpiommc_next_id());
++
++This will allocate the platform device data structures and hook it up to the
++gpiommc driver.
++Then add the gpiommc_platform_data to the platform device.
++
++ err = platform_device_add_data(pdev, pdata, sizeof(struct gpiommc_platform_data));
++
++You may free the local instance of struct gpiommc_platform_data now. (So the
++struct may be allocated on the stack, too).
++Now simply register the platform device.
++
++ err = platform_device_add(pdev);
++
++Done. The gpiommc probe routine will be invoked now and you should see a kernel
++log message for the added device.
++
++
++Registering devices via configfs
++================================
++
++MMC/SD cards connected via GPIO often are a pretty dynamic thing, as for example
++selfmade hacks for soldering an MMC/SD card to standard GPIO pins on embedded
++hardware are a common situation.
++So we provide a dynamic interface to conveniently handle adding and removing
++devices from userspace, without the need to recompile the kernel.
++
++The "gpiommc" subdirectory at the configfs mountpoint is used for handling
++the dynamic configuration.
++
++To create a new device, it must first be allocated with mkdir.
++The following command will allocate a device named "my_mmc":
++ mkdir /config/gpiommc/my_mmc
++
++There are several configuration files available in the new
++/config/gpiommc/my_mmc/ directory:
++
++gpio_data_in = The SPI data-IN GPIO pin number.
++gpio_data_out = The SPI data-OUT GPIO pin number.
++gpio_clock = The SPI Clock GPIO pin number.
++gpio_chipselect = The SPI Chipselect GPIO pin number.
++gpio_chipselect_activelow = Boolean. If 0, Chipselect is active-HIGH.
++ If 1, Chipselect is active-LOW.
++spi_mode = The SPI data mode. Can be 0-3.
++spi_delay = Enable all delays in the lowlevel bitbanging.
++max_bus_speed = The maximum SPI bus speed. In Hertz.
++
++register = Not a configuration parameter.
++ Used to register the configured card
++ with the kernel.
++
++The device must first get configured and then registered by writing "1" to
++the "register" file.
++The configuration parameters "gpio_data_in", "gpio_data_out", "gpio_clock"
++and "gpio_chipselect" are essential and _must_ be configured before writing
++"1" to the "register" file. The registration will fail, otherwise.
++
++The default values for the other parameters are:
++gpio_chipselect_activelow = 1 (CS active-LOW)
++spi_mode = 0 (SPI_MODE_0)
++spi_delay = 1 (enabled)
++max_bus_speed = 5000000 (5 Mhz)
++
++Configuration values can not be changed after registration. To unregister
++the device, write a "0" to the "register" file. The configuration can be
++changed again after unregistering.
++
++To completely remove the device, simply rmdir the directory
++(/config/gpiommc/my_mmc in this example).
++There's no need to first unregister the device before removing it. That will
++be done automatically.
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3025,6 +3025,11 @@ L: linuxppc-dev@lists.ozlabs.org
+ S: Odd Fixes
+ F: drivers/tty/hvc/
+
++GPIOMMC DRIVER
++P: Michael Buesch
++M: mb@bu3sch.de
++S: Maintained
++
+ HARDWARE MONITORING
+ M: Jean Delvare <khali@linux-fr.org>
+ M: Guenter Roeck <guenter.roeck@ericsson.com>
diff --git a/target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch b/target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch
new file mode 100644
index 0000000..d4201eb
--- /dev/null
+++ b/target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch
@@ -0,0 +1,58 @@
+The gpiommc configfs context structure needs locking, as configfs
+does not lock access between files.
+
+--- a/drivers/mmc/host/gpiommc.c
++++ b/drivers/mmc/host/gpiommc.c
+@@ -144,6 +144,8 @@ struct gpiommc_configfs_device {
+ struct platform_device *pdev;
+ /* The configuration */
+ struct gpiommc_platform_data pdata;
++ /* Mutex to protect this structure */
++ struct mutex mutex;
+ };
+
+ #define GPIO_INVALID -1
+@@ -234,6 +236,8 @@ static ssize_t gpiommc_config_attr_show(
+ unsigned int gpio;
+ int err = 0;
+
++ mutex_lock(&dev->mutex);
++
+ if (attr == &gpiommc_attr_DI) {
+ gpio = dev->pdata.pins.gpio_di;
+ if (gpio == GPIO_INVALID)
+@@ -294,6 +298,8 @@ static ssize_t gpiommc_config_attr_show(
+ WARN_ON(1);
+ err = -ENOSYS;
+ out:
++ mutex_unlock(&dev->mutex);
++
+ return err ? err : count;
+ }
+
+@@ -353,6 +359,8 @@ static ssize_t gpiommc_config_attr_store
+ int err = -EINVAL;
+ unsigned long data;
+
++ mutex_lock(&dev->mutex);
++
+ if (attr == &gpiommc_attr_register) {
+ err = strict_strtoul(page, 10, &data);
+ if (err)
+@@ -478,6 +486,8 @@ static ssize_t gpiommc_config_attr_store
+ WARN_ON(1);
+ err = -ENOSYS;
+ out:
++ mutex_unlock(&dev->mutex);
++
+ return err ? err : count;
+ }
+
+@@ -514,6 +524,7 @@ static struct config_item *gpiommc_make_
+ if (!dev)
+ return NULL;
+
++ mutex_init(&dev->mutex);
+ config_item_init_type_name(&dev->item, name,
+ &gpiommc_dev_ci_type);
+
diff --git a/target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch b/target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch
new file mode 100644
index 0000000..3a37c95
--- /dev/null
+++ b/target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch
@@ -0,0 +1,17 @@
+--- a/drivers/crypto/hifn_795x.c
++++ b/drivers/crypto/hifn_795x.c
+@@ -682,12 +682,12 @@ static inline u32 hifn_read_1(struct hif
+
+ static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
+ {
+- writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
++ writel(val, dev->bar[0] + reg);
+ }
+
+ static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
+ {
+- writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
++ writel(val, dev->bar[1] + reg);
+ }
+
+ static void hifn_wait_puc(struct hifn_device *dev)
diff --git a/target/linux/generic/patches-3.3/900-slab_maxsize.patch b/target/linux/generic/patches-3.3/900-slab_maxsize.patch
new file mode 100644
index 0000000..1c95694
--- /dev/null
+++ b/target/linux/generic/patches-3.3/900-slab_maxsize.patch
@@ -0,0 +1,13 @@
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -127,8 +127,8 @@ unsigned int kmem_cache_size(struct kmem
+ * to do various tricks to work around compiler limitations in order to
+ * ensure proper constant folding.
+ */
+-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
+- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
++#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 17 ? \
++ (MAX_ORDER + PAGE_SHIFT - 1) : 17)
+
+ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
+ #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
diff --git a/target/linux/generic/patches-3.3/910-kobject_uevent.patch b/target/linux/generic/patches-3.3/910-kobject_uevent.patch
new file mode 100644
index 0000000..f15749f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/910-kobject_uevent.patch
@@ -0,0 +1,32 @@
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -49,6 +49,18 @@ static const char *kobject_actions[] = {
+ [KOBJ_OFFLINE] = "offline",
+ };
+
++u64 uevent_next_seqnum(void)
++{
++ u64 seq;
++
++ spin_lock(&sequence_lock);
++ seq = ++uevent_seqnum;
++ spin_unlock(&sequence_lock);
++
++ return seq;
++}
++EXPORT_SYMBOL_GPL(uevent_next_seqnum);
++
+ /**
+ * kobject_action_type - translate action string to numeric type
+ *
+@@ -244,9 +256,7 @@ int kobject_uevent_env(struct kobject *k
+ kobj->state_remove_uevent_sent = 1;
+
+ /* we will send an event, so request a new sequence number */
+- spin_lock(&sequence_lock);
+- seq = ++uevent_seqnum;
+- spin_unlock(&sequence_lock);
++ seq = uevent_next_seqnum();
+ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
+ if (retval)
+ goto exit;
diff --git a/target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch b/target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch
new file mode 100644
index 0000000..5573266
--- /dev/null
+++ b/target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch
@@ -0,0 +1,85 @@
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -31,6 +31,8 @@
+ #define UEVENT_NUM_ENVP 32 /* number of env pointers */
+ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
+
++struct sk_buff;
++
+ /* path to the userspace helper executed on an event */
+ extern char uevent_helper[];
+
+@@ -213,6 +215,10 @@ int add_uevent_var(struct kobj_uevent_en
+
+ int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type);
++
++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation);
++
+ #else
+ static inline int kobject_uevent(struct kobject *kobj,
+ enum kobject_action action)
+@@ -229,6 +235,16 @@ int add_uevent_var(struct kobj_uevent_en
+ static inline int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
+ { return -EINVAL; }
++
++void kfree_skb(struct sk_buff *);
++
++static inline int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation)
++{
++ kfree_skb(skb);
++ return 0;
++}
++
+ #endif
+
+ #endif /* _KOBJECT_H_ */
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -380,6 +380,43 @@ int add_uevent_var(struct kobj_uevent_en
+ EXPORT_SYMBOL_GPL(add_uevent_var);
+
+ #if defined(CONFIG_NET)
++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation)
++{
++ struct uevent_sock *ue_sk;
++ int err = 0;
++
++ /* send netlink message */
++ mutex_lock(&uevent_sock_mutex);
++ list_for_each_entry(ue_sk, &uevent_sock_list, list) {
++ struct sock *uevent_sock = ue_sk->sk;
++ struct sk_buff *skb2;
++
++ skb2 = skb_clone(skb, allocation);
++ if (!skb2)
++ break;
++
++ err = netlink_broadcast(uevent_sock, skb2, pid, group,
++ allocation);
++ if (err)
++ break;
++ }
++ mutex_unlock(&uevent_sock_mutex);
++
++ kfree_skb(skb);
++ return err;
++}
++#else
++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation)
++{
++ kfree_skb(skb);
++ return 0;
++}
++#endif
++EXPORT_SYMBOL_GPL(broadcast_uevent);
++
++#if defined(CONFIG_NET)
+ static int uevent_net_init(struct net *net)
+ {
+ struct uevent_sock *ue_sk;
diff --git a/target/linux/generic/patches-3.3/920-unable_to_open_console.patch b/target/linux/generic/patches-3.3/920-unable_to_open_console.patch
new file mode 100644
index 0000000..f08a59f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/920-unable_to_open_console.patch
@@ -0,0 +1,11 @@
+--- a/init/main.c
++++ b/init/main.c
+@@ -815,7 +815,7 @@ static int __init kernel_init(void * unu
+
+ /* Open the /dev/console on the rootfs, this should never fail */
+ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+- printk(KERN_WARNING "Warning: unable to open an initial console.\n");
++ printk(KERN_WARNING "Please be patient, while OpenWrt loads ...\n");
+
+ (void) sys_dup(0);
+ (void) sys_dup(0);
diff --git a/target/linux/generic/patches-3.3/921-use_preinit_as_init.patch b/target/linux/generic/patches-3.3/921-use_preinit_as_init.patch
new file mode 100644
index 0000000..395cb6d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/921-use_preinit_as_init.patch
@@ -0,0 +1,14 @@
+--- a/init/main.c
++++ b/init/main.c
+@@ -777,10 +777,7 @@ static noinline int init_post(void)
+ printk(KERN_WARNING "Failed to execute %s. Attempting "
+ "defaults...\n", execute_command);
+ }
+- run_init_process("/sbin/init");
+- run_init_process("/etc/init");
+- run_init_process("/bin/init");
+- run_init_process("/bin/sh");
++ run_init_process("/etc/preinit");
+
+ panic("No init found. Try passing init= option to kernel. "
+ "See Linux Documentation/init.txt for guidance.");
diff --git a/target/linux/generic/patches-3.3/930-crashlog.patch b/target/linux/generic/patches-3.3/930-crashlog.patch
new file mode 100644
index 0000000..cc18714
--- /dev/null
+++ b/target/linux/generic/patches-3.3/930-crashlog.patch
@@ -0,0 +1,242 @@
+--- /dev/null
++++ b/include/linux/crashlog.h
+@@ -0,0 +1,12 @@
++#ifndef __CRASHLOG_H
++#define __CRASHLOG_H
++
++#ifdef CONFIG_CRASHLOG
++void __init crashlog_init_mem(struct bootmem_data *bdata);
++#else
++static inline void crashlog_init_mem(struct bootmem_data *bdata)
++{
++}
++#endif
++
++#endif
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -926,6 +926,10 @@ config RELAY
+
+ If unsure, say N.
+
++config CRASHLOG
++ bool "Crash logging"
++ depends on !NO_BOOTMEM && !HAVE_MEMBLOCK
++
+ config BLK_DEV_INITRD
+ bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
+ depends on BROKEN || !FRV
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -107,6 +107,7 @@ obj-$(CONFIG_USER_RETURN_NOTIFIER) += us
+ obj-$(CONFIG_PADATA) += padata.o
+ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
++obj-$(CONFIG_CRASHLOG) += crashlog.o
+
+ $(obj)/configs.o: $(obj)/config_data.h
+
+--- /dev/null
++++ b/kernel/crashlog.c
+@@ -0,0 +1,171 @@
++/*
++ * Crash information logger
++ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
++ *
++ * Based on ramoops.c
++ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/bootmem.h>
++#include <linux/debugfs.h>
++#include <linux/crashlog.h>
++#include <linux/kmsg_dump.h>
++#include <linux/module.h>
++#include <linux/pfn.h>
++#include <asm/io.h>
++
++#define CRASHLOG_PAGES 4
++#define CRASHLOG_SIZE (CRASHLOG_PAGES * PAGE_SIZE)
++#define CRASHLOG_MAGIC 0xa1eedead
++
++/*
++ * Start the log at 1M before the end of RAM, as some boot loaders like
++ * to use the end of the RAM for stack usage and other things
++ * If this fails, fall back to using the last part.
++ */
++#define CRASHLOG_OFFSET (1024 * 1024)
++
++struct crashlog_data {
++ u32 magic;
++ u32 len;
++ u8 data[];
++};
++
++static struct debugfs_blob_wrapper crashlog_blob;
++static unsigned long crashlog_addr = 0;
++static struct crashlog_data *crashlog_buf;
++static struct kmsg_dumper dump;
++static bool first = true;
++
++extern struct list_head *crashlog_modules;
++
++void __init crashlog_init_mem(bootmem_data_t *bdata)
++{
++ unsigned long addr;
++
++ if (crashlog_addr)
++ return;
++
++ addr = PFN_PHYS(bdata->node_low_pfn) - CRASHLOG_OFFSET;
++ if (reserve_bootmem(addr, CRASHLOG_SIZE, BOOTMEM_EXCLUSIVE) < 0) {
++ printk("Crashlog failed to allocate RAM at address 0x%lx\n", addr);
++ bdata->node_low_pfn -= CRASHLOG_PAGES;
++ addr = PFN_PHYS(bdata->node_low_pfn);
++ }
++ crashlog_addr = addr;
++}
++
++static void __init crashlog_copy(void)
++{
++ if (crashlog_buf->magic != CRASHLOG_MAGIC)
++ return;
++
++ if (!crashlog_buf->len || crashlog_buf->len >
++ CRASHLOG_SIZE - sizeof(*crashlog_buf))
++ return;
++
++ crashlog_blob.size = crashlog_buf->len;
++ crashlog_blob.data = kmemdup(crashlog_buf->data,
++ crashlog_buf->len, GFP_KERNEL);
++
++ debugfs_create_blob("crashlog", 0700, NULL, &crashlog_blob);
++}
++
++static int get_maxlen(void)
++{
++ return CRASHLOG_SIZE - sizeof(*crashlog_buf) - crashlog_buf->len;
++}
++
++static void crashlog_printf(const char *fmt, ...)
++{
++ va_list args;
++ int len = get_maxlen();
++
++ if (!len)
++ return;
++
++ va_start(args, fmt);
++ crashlog_buf->len += vsnprintf(
++ &crashlog_buf->data[crashlog_buf->len],
++ len, fmt, args);
++ va_end(args);
++}
++
++static void crashlog_do_dump(struct kmsg_dumper *dumper,
++ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
++ const char *s2, unsigned long l2)
++{
++ unsigned long s1_start, s2_start;
++ unsigned long l1_cpy, l2_cpy;
++ struct timeval tv;
++ struct module *m;
++ char *buf;
++ int len;
++
++ if (!first)
++ crashlog_printf("\n===================================\n");
++
++ do_gettimeofday(&tv);
++ crashlog_printf("Time: %lu.%lu\n",
++ (long)tv.tv_sec, (long)tv.tv_usec);
++
++ if (first) {
++ crashlog_printf("Modules:");
++ list_for_each_entry(m, crashlog_modules, list) {
++ crashlog_printf("\t%s@%p+%x", m->name,
++ m->module_core, m->core_size,
++ m->module_init, m->init_size);
++ }
++ crashlog_printf("\n");
++ first = false;
++ }
++
++ buf = (char *)&crashlog_buf->data[crashlog_buf->len];
++ len = get_maxlen();
++
++ l2_cpy = min(l2, (unsigned long)len);
++ l1_cpy = min(l1, (unsigned long)len - l2_cpy);
++
++ s2_start = l2 - l2_cpy;
++ s1_start = l1 - l1_cpy;
++
++ memcpy(buf, s1 + s1_start, l1_cpy);
++ memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
++ crashlog_buf->len += l1_cpy + l2_cpy;
++}
++
++
++int __init crashlog_init_fs(void)
++{
++ if (!crashlog_addr)
++ return -ENOMEM;
++
++ crashlog_buf = ioremap(crashlog_addr, CRASHLOG_SIZE);
++
++ crashlog_copy();
++
++ crashlog_buf->magic = CRASHLOG_MAGIC;
++ crashlog_buf->len = 0;
++
++ dump.dump = crashlog_do_dump;
++ kmsg_dump_register(&dump);
++
++ return 0;
++}
++module_init(crashlog_init_fs);
+--- a/mm/bootmem.c
++++ b/mm/bootmem.c
+@@ -15,6 +15,7 @@
+ #include <linux/export.h>
+ #include <linux/kmemleak.h>
+ #include <linux/range.h>
++#include <linux/crashlog.h>
+ #include <linux/memblock.h>
+
+ #include <asm/bug.h>
+@@ -177,6 +178,7 @@ static unsigned long __init free_all_boo
+ if (!bdata->node_bootmem_map)
+ return 0;
+
++ crashlog_init_mem(bdata);
+ start = bdata->node_min_pfn;
+ end = bdata->node_low_pfn;
+
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -101,6 +101,9 @@ static LIST_HEAD(modules);
+ #ifdef CONFIG_KGDB_KDB
+ struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+ #endif /* CONFIG_KGDB_KDB */
++#ifdef CONFIG_CRASHLOG
++struct list_head *crashlog_modules = &modules;
++#endif
+
+
+ /* Block module loading/unloading? */
diff --git a/target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch b/target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch
new file mode 100644
index 0000000..55a2bed
--- /dev/null
+++ b/target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch
@@ -0,0 +1,20 @@
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -961,3 +961,6 @@ config CRYPTO_USER_API_SKCIPHER
+ source "drivers/crypto/Kconfig"
+
+ endif # if CRYPTO
++
++source "crypto/ocf/Kconfig"
++
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -91,6 +91,8 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg.
+ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
+
++obj-$(CONFIG_OCF_OCF) += ocf/
++
+ #
+ # generic algorithms and the async_tx api
+ #
diff --git a/target/linux/generic/patches-3.3/941-ocf_20110720.patch b/target/linux/generic/patches-3.3/941-ocf_20110720.patch
new file mode 100644
index 0000000..5ff399e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/941-ocf_20110720.patch
@@ -0,0 +1,133 @@
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -430,6 +430,7 @@ struct task_struct *find_task_by_vpid(pi
+ {
+ return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
+ }
++EXPORT_SYMBOL(find_task_by_vpid);
+
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -130,6 +130,9 @@
+ * void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
++ * void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
++ * int random_input_wait(void);
++ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+@@ -147,6 +150,13 @@
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
++ * random_input_words() just provides a raw block of entropy to the input
++ * pool, such as from a hardware entropy generator.
++ *
++ * random_input_wait() suspends the caller until such time as the
++ * entropy pool falls below the write threshold, and returns a count of how
++ * much entropy (in bits) is needed to sustain the pool.
++ *
+ * All of these routines try to estimate how many bits of randomness a
+ * particular randomness source. They do this by keeping track of the
+ * first and second order deltas of the event timings.
+@@ -726,6 +736,63 @@ void add_disk_randomness(struct gendisk
+ }
+ #endif
+
++/*
++ * random_input_words - add bulk entropy to pool
++ *
++ * @buf: buffer to add
++ * @wordcount: number of __u32 words to add
++ * @ent_count: total amount of entropy (in bits) to credit
++ *
++ * this provides bulk input of entropy to the input pool
++ *
++ */
++void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
++{
++ mix_pool_bytes(&input_pool, buf, wordcount*4);
++
++ credit_entropy_bits(&input_pool, ent_count);
++
++ DEBUG_ENT("crediting %d bits => %d\n",
++ ent_count, input_pool.entropy_count);
++ /*
++ * Wake up waiting processes if we have enough
++ * entropy.
++ */
++ if (input_pool.entropy_count >= random_read_wakeup_thresh)
++ wake_up_interruptible(&random_read_wait);
++}
++EXPORT_SYMBOL(random_input_words);
++
++/*
++ * random_input_wait - wait until random needs entropy
++ *
++ * this function sleeps until the /dev/random subsystem actually
++ * needs more entropy, and then return the amount of entropy
++ * that it would be nice to have added to the system.
++ */
++int random_input_wait(void)
++{
++ int count;
++
++ wait_event_interruptible(random_write_wait,
++ input_pool.entropy_count < random_write_wakeup_thresh);
++
++ count = random_write_wakeup_thresh - input_pool.entropy_count;
++
++ /* likely we got woken up due to a signal */
++ if (count <= 0) count = random_read_wakeup_thresh;
++
++ DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
++ count,
++ input_pool.entropy_count, random_write_wakeup_thresh);
++
++ return count;
++}
++EXPORT_SYMBOL(random_input_wait);
++
++
++#define EXTRACT_SIZE 10
++
+ /*********************************************************************
+ *
+ * Entropy extraction routines
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -142,6 +142,7 @@ SYSCALL_DEFINE1(dup, unsigned int, filde
+ }
+ return ret;
+ }
++EXPORT_SYMBOL(sys_dup);
+
+ #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
+
+--- a/include/linux/miscdevice.h
++++ b/include/linux/miscdevice.h
+@@ -19,6 +19,7 @@
+ #define APOLLO_MOUSE_MINOR 7
+ #define PC110PAD_MINOR 9
+ /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
++#define CRYPTODEV_MINOR 70 /* /dev/crypto */
+ #define WATCHDOG_MINOR 130 /* Watchdog timer */
+ #define TEMP_MINOR 131 /* Temperature Sensor */
+ #define RTC_MINOR 135
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -54,6 +54,10 @@ extern void add_input_randomness(unsigne
+ unsigned int value);
+ extern void add_interrupt_randomness(int irq);
+
++extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
++extern int random_input_wait(void);
++#define HAS_RANDOM_INPUT_WAIT 1
++
+ extern void get_random_bytes(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+
diff --git a/target/linux/generic/patches-3.3/950-vm_exports.patch b/target/linux/generic/patches-3.3/950-vm_exports.patch
new file mode 100644
index 0000000..f0d9a64
--- /dev/null
+++ b/target/linux/generic/patches-3.3/950-vm_exports.patch
@@ -0,0 +1,117 @@
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2490,6 +2490,16 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
+
+ /* common code */
+
++void shmem_set_file(struct vm_area_struct *vma, struct file *file)
++{
++ if (vma->vm_file)
++ fput(vma->vm_file);
++ vma->vm_file = file;
++ vma->vm_ops = &shmem_vm_ops;
++ vma->vm_flags |= VM_CAN_NONLINEAR;
++}
++EXPORT_SYMBOL_GPL(shmem_set_file);
++
+ /**
+ * shmem_file_setup - get an unlinked file living in tmpfs
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+@@ -2567,11 +2577,8 @@ int shmem_zero_setup(struct vm_area_stru
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+- if (vma->vm_file)
+- fput(vma->vm_file);
+- vma->vm_file = file;
+- vma->vm_ops = &shmem_vm_ops;
+- vma->vm_flags |= VM_CAN_NONLINEAR;
++ shmem_set_file(vma, file);
++
+ return 0;
+ }
+
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -268,6 +268,7 @@ int expand_files(struct files_struct *fi
+ /* All good, so we try */
+ return expand_fdtable(files, nr);
+ }
++EXPORT_SYMBOL_GPL(expand_files);
+
+ static int count_open_files(struct fdtable *fdt)
+ {
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -500,6 +500,7 @@ struct files_struct *get_files_struct(st
+
+ return files;
+ }
++EXPORT_SYMBOL_GPL(get_files_struct);
+
+ void put_files_struct(struct files_struct *files)
+ {
+@@ -521,6 +522,7 @@ void put_files_struct(struct files_struc
+ rcu_read_unlock();
+ }
+ }
++EXPORT_SYMBOL_GPL(put_files_struct);
+
+ void reset_files_struct(struct files_struct *files)
+ {
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3861,6 +3861,7 @@ int can_nice(const struct task_struct *p
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+ capable(CAP_SYS_NICE));
+ }
++EXPORT_SYMBOL_GPL(can_nice);
+
+ #ifdef __ARCH_WANT_SYS_NICE
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1404,6 +1404,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(&tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL_GPL(zap_page_range);
+
+ /**
+ * zap_vma_ptes - remove ptes mapping the vma
+@@ -3079,6 +3080,7 @@ static inline int check_stack_guard_page
+ }
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(vmtruncate_range);
+
+ /*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1257,6 +1257,7 @@ void unmap_kernel_range(unsigned long ad
+ vunmap_page_range(addr, end);
+ flush_tlb_kernel_range(addr, end);
+ }
++EXPORT_SYMBOL_GPL(unmap_kernel_range);
+
+ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+ {
+@@ -1394,6 +1395,7 @@ struct vm_struct *get_vm_area(unsigned l
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, __builtin_return_address(0));
+ }
++EXPORT_SYMBOL_GPL(get_vm_area);
+
+ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+ void *caller)
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -872,6 +872,7 @@ extern bool skip_free_areas_node(unsigne
+
+ int shmem_lock(struct file *file, int lock, struct user_struct *user);
+ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
++void shmem_set_file(struct vm_area_struct *vma, struct file *file);
+ int shmem_zero_setup(struct vm_area_struct *);
+
+ extern int can_do_mlock(void);
diff --git a/target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch b/target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch
new file mode 100644
index 0000000..ee7c118
--- /dev/null
+++ b/target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch
@@ -0,0 +1,23 @@
+--- /dev/null
++++ b/include/linux/decompress/unlzo_mm.h
+@@ -0,0 +1,10 @@
++#ifndef UNLZO_MM_H
++#define UNLZO_MM_H
++
++#ifdef STATIC
++#define INIT
++#else
++#define INIT __init
++#endif
++
++#endif
+--- a/lib/decompress_unlzo.c
++++ b/lib/decompress_unlzo.c
+@@ -38,6 +38,7 @@
+
+ #include <linux/types.h>
+ #include <linux/lzo.h>
++#include <linux/decompress/unlzo_mm.h>
+ #include <linux/decompress/mm.h>
+
+ #include <linux/compiler.h>
diff --git a/target/linux/generic/patches-3.3/980-update_arm_machtypes.patch b/target/linux/generic/patches-3.3/980-update_arm_machtypes.patch
new file mode 100644
index 0000000..c5a1470
--- /dev/null
+++ b/target/linux/generic/patches-3.3/980-update_arm_machtypes.patch
@@ -0,0 +1,2868 @@
+--- a/arch/arm/tools/mach-types
++++ b/arch/arm/tools/mach-types
+@@ -12,483 +12,2802 @@
+ #
+ # http://www.arm.linux.org.uk/developer/machines/?action=new
+ #
+-# This is a cut-down version of the file; it contains only machines that
+-# are merged into mainline or have been edited in the machine database
+-# within the last 12 months. References to machine_is_NAME() do not count!
+-#
+-# Last update: Tue Dec 6 11:07:38 2011
++# Last update: Wed Jul 6 10:57:10 2011
+ #
+ # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
+ #
+ ebsa110 ARCH_EBSA110 EBSA110 0
+ riscpc ARCH_RPC RISCPC 1
++nexuspci ARCH_NEXUSPCI NEXUSPCI 3
+ ebsa285 ARCH_EBSA285 EBSA285 4
+ netwinder ARCH_NETWINDER NETWINDER 5
+ cats ARCH_CATS CATS 6
++tbox ARCH_TBOX TBOX 7
++co285 ARCH_CO285 CO285 8
++clps7110 ARCH_CLPS7110 CLPS7110 9
++archimedes ARCH_ARC ARCHIMEDES 10
++a5k ARCH_A5K A5K 11
++etoile ARCH_ETOILE ETOILE 12
++lacie_nas ARCH_LACIE_NAS LACIE_NAS 13
++clps7500 ARCH_CLPS7500 CLPS7500 14
+ shark ARCH_SHARK SHARK 15
+ brutus SA1100_BRUTUS BRUTUS 16
+ personal_server ARCH_PERSONAL_SERVER PERSONAL_SERVER 17
++itsy SA1100_ITSY ITSY 18
+ l7200 ARCH_L7200 L7200 19
+ pleb SA1100_PLEB PLEB 20
+ integrator ARCH_INTEGRATOR INTEGRATOR 21
+ h3600 SA1100_H3600 H3600 22
++ixp1200 ARCH_IXP1200 IXP1200 23
+ p720t ARCH_P720T P720T 24
+ assabet SA1100_ASSABET ASSABET 25
++victor SA1100_VICTOR VICTOR 26
+ lart SA1100_LART LART 27
++ranger SA1100_RANGER RANGER 28
+ graphicsclient SA1100_GRAPHICSCLIENT GRAPHICSCLIENT 29
+ xp860 SA1100_XP860 XP860 30
+ cerf SA1100_CERF CERF 31
+ nanoengine SA1100_NANOENGINE NANOENGINE 32
++fpic SA1100_FPIC FPIC 33
++extenex1 SA1100_EXTENEX1 EXTENEX1 34
++sherman SA1100_SHERMAN SHERMAN 35
++accelent_sa SA1100_ACCELENT ACCELENT_SA 36
++accelent_l7200 ARCH_L7200_ACCELENT ACCELENT_L7200 37
++netport SA1100_NETPORT NETPORT 38
++pangolin SA1100_PANGOLIN PANGOLIN 39
++yopy SA1100_YOPY YOPY 40
++coolidge SA1100_COOLIDGE COOLIDGE 41
++huw_webpanel SA1100_HUW_WEBPANEL HUW_WEBPANEL 42
++spotme ARCH_SPOTME SPOTME 43
++freebird ARCH_FREEBIRD FREEBIRD 44
++ti925 ARCH_TI925 TI925 45
++riscstation ARCH_RISCSTATION RISCSTATION 46
++cavy SA1100_CAVY CAVY 47
+ jornada720 SA1100_JORNADA720 JORNADA720 48
++omnimeter SA1100_OMNIMETER OMNIMETER 49
+ edb7211 ARCH_EDB7211 EDB7211 50
++citygo SA1100_CITYGO CITYGO 51
+ pfs168 SA1100_PFS168 PFS168 52
++spot SA1100_SPOT SPOT 53
+ flexanet SA1100_FLEXANET FLEXANET 54
++webpal ARCH_WEBPAL WEBPAL 55
++linpda SA1100_LINPDA LINPDA 56
++anakin ARCH_ANAKIN ANAKIN 57
++mvi SA1100_MVI MVI 58
++jupiter SA1100_JUPITER JUPITER 59
++psionw ARCH_PSIONW PSIONW 60
++aln SA1100_ALN ALN 61
++epxa ARCH_CAMELOT CAMELOT 62
++gds2200 SA1100_GDS2200 GDS2200 63
++netbook SA1100_PSION_SERIES7 PSION_SERIES7 64
++xfile SA1100_XFILE XFILE 65
++accelent_ep9312 ARCH_ACCELENT_EP9312 ACCELENT_EP9312 66
++ic200 ARCH_IC200 IC200 67
++creditlart SA1100_CREDITLART CREDITLART 68
++htm SA1100_HTM HTM 69
++iq80310 ARCH_IQ80310 IQ80310 70
++freebot SA1100_FREEBOT FREEBOT 71
++entel ARCH_ENTEL ENTEL 72
++enp3510 ARCH_ENP3510 ENP3510 73
++trizeps SA1100_TRIZEPS TRIZEPS 74
++nesa SA1100_NESA NESA 75
++venus ARCH_VENUS VENUS 76
++tardis ARCH_TARDIS TARDIS 77
++mercury ARCH_MERCURY MERCURY 78
++empeg SA1100_EMPEG EMPEG 79
++adi_evb ARCH_I80200FCC I80200FCC 80
++itt_cpb SA1100_ITT_CPB ITT_CPB 81
++svc SA1100_SVC SVC 82
++alpha2 SA1100_ALPHA2 ALPHA2 84
++alpha1 SA1100_ALPHA1 ALPHA1 85
++netarm ARCH_NETARM NETARM 86
+ simpad SA1100_SIMPAD SIMPAD 87
++pda1 ARCH_PDA1 PDA1 88
+ lubbock ARCH_LUBBOCK LUBBOCK 89
++aniko ARCH_ANIKO ANIKO 90
+ clep7212 ARCH_CLEP7212 CLEP7212 91
++cs89712 ARCH_CS89712 CS89712 92
++weararm SA1100_WEARARM WEARARM 93
++possio_px SA1100_POSSIO_PX POSSIO_PX 94
++sidearm SA1100_SIDEARM SIDEARM 95
++stork SA1100_STORK STORK 96
+ shannon SA1100_SHANNON SHANNON 97
++ace ARCH_ACE ACE 98
++ballyarm SA1100_BALLYARM BALLYARM 99
++simputer SA1100_SIMPUTER SIMPUTER 100
++nexterm SA1100_NEXTERM NEXTERM 101
++sa1100_elf SA1100_SA1100_ELF SA1100_ELF 102
++gator SA1100_GATOR GATOR 103
++granite ARCH_GRANITE GRANITE 104
+ consus SA1100_CONSUS CONSUS 105
+ aaed2000 ARCH_AAED2000 AAED2000 106
+ cdb89712 ARCH_CDB89712 CDB89712 107
+ graphicsmaster SA1100_GRAPHICSMASTER GRAPHICSMASTER 108
+ adsbitsy SA1100_ADSBITSY ADSBITSY 109
+ pxa_idp ARCH_PXA_IDP PXA_IDP 110
++plce ARCH_PLCE PLCE 111
+ pt_system3 SA1100_PT_SYSTEM3 PT_SYSTEM3 112
++murphy ARCH_MEDALB MEDALB 113
++eagle ARCH_EAGLE EAGLE 114
++dsc21 ARCH_DSC21 DSC21 115
++dsc24 ARCH_DSC24 DSC24 116
++ti5472 ARCH_TI5472 TI5472 117
+ autcpu12 ARCH_AUTCPU12 AUTCPU12 118
++uengine ARCH_UENGINE UENGINE 119
++bluestem SA1100_BLUESTEM BLUESTEM 120
++xingu8 ARCH_XINGU8 XINGU8 121
++bushstb ARCH_BUSHSTB BUSHSTB 122
++epsilon1 SA1100_EPSILON1 EPSILON1 123
++balloon SA1100_BALLOON BALLOON 124
++puppy ARCH_PUPPY PUPPY 125
++elroy SA1100_ELROY ELROY 126
++gms720 ARCH_GMS720 GMS720 127
++s24x ARCH_S24X S24X 128
++jtel_clep7312 ARCH_JTEL_CLEP7312 JTEL_CLEP7312 129
++cx821xx ARCH_CX821XX CX821XX 130
++edb7312 ARCH_EDB7312 EDB7312 131
++bsa1110 SA1100_BSA1110 BSA1110 132
++powerpin ARCH_POWERPIN POWERPIN 133
++openarm ARCH_OPENARM OPENARM 134
++whitechapel SA1100_WHITECHAPEL WHITECHAPEL 135
+ h3100 SA1100_H3100 H3100 136
++h3800 SA1100_H3800 H3800 137
++blue_v1 ARCH_BLUE_V1 BLUE_V1 138
++pxa_cerf ARCH_PXA_CERF PXA_CERF 139
++arm7tevb ARCH_ARM7TEVB ARM7TEVB 140
++d7400 SA1100_D7400 D7400 141
++piranha ARCH_PIRANHA PIRANHA 142
++sbcamelot SA1100_SBCAMELOT SBCAMELOT 143
++kings SA1100_KINGS KINGS 144
++smdk2400 ARCH_SMDK2400 SMDK2400 145
+ collie SA1100_COLLIE COLLIE 146
++idr ARCH_IDR IDR 147
+ badge4 SA1100_BADGE4 BADGE4 148
++webnet ARCH_WEBNET WEBNET 149
++d7300 SA1100_D7300 D7300 150
++cep SA1100_CEP CEP 151
+ fortunet ARCH_FORTUNET FORTUNET 152
++vc547x ARCH_VC547X VC547X 153
++filewalker SA1100_FILEWALKER FILEWALKER 154
++netgateway SA1100_NETGATEWAY NETGATEWAY 155
++symbol2800 SA1100_SYMBOL2800 SYMBOL2800 156
++suns SA1100_SUNS SUNS 157
++frodo SA1100_FRODO FRODO 158
++ms301 SA1100_MACH_TYTE_MS301 MACH_TYTE_MS301 159
+ mx1ads ARCH_MX1ADS MX1ADS 160
+ h7201 ARCH_H7201 H7201 161
+ h7202 ARCH_H7202 H7202 162
++amico ARCH_AMICO AMICO 163
++iam SA1100_IAM IAM 164
++tt530 SA1100_TT530 TT530 165
++sam2400 ARCH_SAM2400 SAM2400 166
++jornada56x SA1100_JORNADA56X JORNADA56X 167
++active SA1100_ACTIVE ACTIVE 168
+ iq80321 ARCH_IQ80321 IQ80321 169
++wid SA1100_WID WID 170
++sabinal ARCH_SABINAL SABINAL 171
++ixp425_matacumbe ARCH_IXP425_MATACUMBE IXP425_MATACUMBE 172
++miniprint SA1100_MINIPRINT MINIPRINT 173
++adm510x ARCH_ADM510X ADM510X 174
++svs200 SA1100_SVS200 SVS200 175
++atg_tcu ARCH_ATG_TCU ATG_TCU 176
++jornada820 SA1100_JORNADA820 JORNADA820 177
++s3c44b0 ARCH_S3C44B0 S3C44B0 178
++margis2 ARCH_MARGIS2 MARGIS2 179
+ ks8695 ARCH_KS8695 KS8695 180
++brh ARCH_BRH BRH 181
++s3c2410 ARCH_S3C2410 S3C2410 182
++possio_px30 ARCH_POSSIO_PX30 POSSIO_PX30 183
++s3c2800 ARCH_S3C2800 S3C2800 184
++fleetwood SA1100_FLEETWOOD FLEETWOOD 185
++omaha ARCH_OMAHA OMAHA 186
++ta7 ARCH_TA7 TA7 187
++nova SA1100_NOVA NOVA 188
++hmk ARCH_HMK HMK 189
+ karo ARCH_KARO KARO 190
++fester SA1100_FESTER FESTER 191
++gpi ARCH_GPI GPI 192
+ smdk2410 ARCH_SMDK2410 SMDK2410 193
++i519 ARCH_I519 I519 194
++nexio SA1100_NEXIO NEXIO 195
++bitbox SA1100_BITBOX BITBOX 196
++g200 SA1100_G200 G200 197
++gill SA1100_GILL GILL 198
++pxa_mercury ARCH_PXA_MERCURY PXA_MERCURY 199
+ ceiva ARCH_CEIVA CEIVA 200
++fret SA1100_FRET FRET 201
++emailphone SA1100_EMAILPHONE EMAILPHONE 202
++h3900 ARCH_H3900 H3900 203
++pxa1 ARCH_PXA1 PXA1 204
++koan369 SA1100_KOAN369 KOAN369 205
++cogent ARCH_COGENT COGENT 206
++esl_simputer ARCH_ESL_SIMPUTER ESL_SIMPUTER 207
++esl_simputer_clr ARCH_ESL_SIMPUTER_CLR ESL_SIMPUTER_CLR 208
++esl_simputer_bw ARCH_ESL_SIMPUTER_BW ESL_SIMPUTER_BW 209
++hhp_cradle ARCH_HHP_CRADLE HHP_CRADLE 210
++he500 ARCH_HE500 HE500 211
++inhandelf2 SA1100_INHANDELF2 INHANDELF2 212
++inhandftip SA1100_INHANDFTIP INHANDFTIP 213
++dnp1110 SA1100_DNP1110 DNP1110 214
++pnp1110 SA1100_PNP1110 PNP1110 215
++csb226 ARCH_CSB226 CSB226 216
++arnold SA1100_ARNOLD ARNOLD 217
+ voiceblue MACH_VOICEBLUE VOICEBLUE 218
++jz8028 ARCH_JZ8028 JZ8028 219
+ h5400 ARCH_H5400 H5400 220
++forte SA1100_FORTE FORTE 221
++acam SA1100_ACAM ACAM 222
++abox SA1100_ABOX ABOX 223
++atmel ARCH_ATMEL ATMEL 224
++sitsang ARCH_SITSANG SITSANG 225
++cpu1110lcdnet SA1100_CPU1110LCDNET CPU1110LCDNET 226
++mpl_vcma9 ARCH_MPL_VCMA9 MPL_VCMA9 227
++opus_a1 ARCH_OPUS_A1 OPUS_A1 228
++daytona ARCH_DAYTONA DAYTONA 229
++killbear SA1100_KILLBEAR KILLBEAR 230
++yoho ARCH_YOHO YOHO 231
++jasper ARCH_JASPER JASPER 232
++dsc25 ARCH_DSC25 DSC25 233
+ omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234
++mnci ARCH_RAMSES RAMSES 235
++s28x ARCH_S28X S28X 236
++mport3 ARCH_MPORT3 MPORT3 237
++pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238
++pdb ARCH_PDB PDB 239
++blue_2g SA1100_BLUE_2G BLUE_2G 240
++bluearch SA1100_BLUEARCH BLUEARCH 241
+ ixdp2400 ARCH_IXDP2400 IXDP2400 242
+ ixdp2800 ARCH_IXDP2800 IXDP2800 243
++explorer SA1100_EXPLORER EXPLORER 244
+ ixdp425 ARCH_IXDP425 IXDP425 245
++chimp ARCH_CHIMP CHIMP 246
++stork_nest ARCH_STORK_NEST STORK_NEST 247
++stork_egg ARCH_STORK_EGG STORK_EGG 248
++wismo SA1100_WISMO WISMO 249
++ezlinx ARCH_EZLINX EZLINX 250
++at91rm9200 ARCH_AT91RM9200 AT91RM9200 251
++adtech_orion ARCH_ADTECH_ORION ADTECH_ORION 252
++neptune ARCH_NEPTUNE NEPTUNE 253
+ hackkit SA1100_HACKKIT HACKKIT 254
++pxa_wins30 ARCH_PXA_WINS30 PXA_WINS30 255
++lavinna SA1100_LAVINNA LAVINNA 256
++pxa_uengine ARCH_PXA_UENGINE PXA_UENGINE 257
++innokom ARCH_INNOKOM INNOKOM 258
++bms ARCH_BMS BMS 259
+ ixcdp1100 ARCH_IXCDP1100 IXCDP1100 260
++prpmc1100 ARCH_PRPMC1100 PRPMC1100 261
+ at91rm9200dk ARCH_AT91RM9200DK AT91RM9200DK 262
++armstick ARCH_ARMSTICK ARMSTICK 263
++armonie ARCH_ARMONIE ARMONIE 264
++mport1 ARCH_MPORT1 MPORT1 265
++s3c5410 ARCH_S3C5410 S3C5410 266
++zcp320a ARCH_ZCP320A ZCP320A 267
++i_box ARCH_I_BOX I_BOX 268
++stlc1502 ARCH_STLC1502 STLC1502 269
++siren ARCH_SIREN SIREN 270
++greenlake ARCH_GREENLAKE GREENLAKE 271
++argus ARCH_ARGUS ARGUS 272
++combadge SA1100_COMBADGE COMBADGE 273
++rokepxa ARCH_ROKEPXA ROKEPXA 274
+ cintegrator ARCH_CINTEGRATOR CINTEGRATOR 275
++guidea07 ARCH_GUIDEA07 GUIDEA07 276
++tat257 ARCH_TAT257 TAT257 277
++igp2425 ARCH_IGP2425 IGP2425 278
++bluegrama ARCH_BLUEGRAMMA BLUEGRAMMA 279
++ipod ARCH_IPOD IPOD 280
++adsbitsyx ARCH_ADSBITSYX ADSBITSYX 281
++trizeps2 ARCH_TRIZEPS2 TRIZEPS2 282
+ viper ARCH_VIPER VIPER 283
++adsbitsyplus SA1100_ADSBITSYPLUS ADSBITSYPLUS 284
++adsagc SA1100_ADSAGC ADSAGC 285
++stp7312 ARCH_STP7312 STP7312 286
++nx_phnx MACH_NX_PHNX NX_PHNX 287
++wep_ep250 ARCH_WEP_EP250 WEP_EP250 288
++inhandelf3 ARCH_INHANDELF3 INHANDELF3 289
+ adi_coyote ARCH_ADI_COYOTE ADI_COYOTE 290
++iyonix ARCH_IYONIX IYONIX 291
++damicam1 ARCH_DAMICAM_SA1110 DAMICAM_SA1110 292
++meg03 ARCH_MEG03 MEG03 293
++pxa_whitechapel ARCH_PXA_WHITECHAPEL PXA_WHITECHAPEL 294
++nwsc ARCH_NWSC NWSC 295
++nwlarm ARCH_NWLARM NWLARM 296
++ixp425_mguard ARCH_IXP425_MGUARD IXP425_MGUARD 297
++pxa_netdcu4 ARCH_PXA_NETDCU4 PXA_NETDCU4 298
+ ixdp2401 ARCH_IXDP2401 IXDP2401 299
+ ixdp2801 ARCH_IXDP2801 IXDP2801 300
++zodiac ARCH_ZODIAC ZODIAC 301
++armmodul ARCH_ARMMODUL ARMMODUL 302
++ketop SA1100_KETOP KETOP 303
++av7200 ARCH_AV7200 AV7200 304
++arch_ti925 ARCH_ARCH_TI925 ARCH_TI925 305
++acq200 ARCH_ACQ200 ACQ200 306
++pt_dafit SA1100_PT_DAFIT PT_DAFIT 307
++ihba ARCH_IHBA IHBA 308
++quinque ARCH_QUINQUE QUINQUE 309
++nimbraone ARCH_NIMBRAONE NIMBRAONE 310
++nimbra29x ARCH_NIMBRA29X NIMBRA29X 311
++nimbra210 ARCH_NIMBRA210 NIMBRA210 312
++hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313
++labarm ARCH_LABARM LABARM 314
++m825xx ARCH_M825XX M825XX 315
++m7100 SA1100_M7100 M7100 316
++nipc2 ARCH_NIPC2 NIPC2 317
++fu7202 ARCH_FU7202 FU7202 318
++adsagx ARCH_ADSAGX ADSAGX 319
++pxa_pooh ARCH_PXA_POOH PXA_POOH 320
++bandon ARCH_BANDON BANDON 321
++pcm7210 ARCH_PCM7210 PCM7210 322
++nms9200 ARCH_NMS9200 NMS9200 323
++logodl ARCH_LOGODL LOGODL 324
++m7140 SA1100_M7140 M7140 325
++korebot ARCH_KOREBOT KOREBOT 326
+ iq31244 ARCH_IQ31244 IQ31244 327
++koan393 SA1100_KOAN393 KOAN393 328
++inhandftip3 ARCH_INHANDFTIP3 INHANDFTIP3 329
++gonzo ARCH_GONZO GONZO 330
+ bast ARCH_BAST BAST 331
++scanpass ARCH_SCANPASS SCANPASS 332
++ep7312_pooh ARCH_EP7312_POOH EP7312_POOH 333
++ta7s ARCH_TA7S TA7S 334
++ta7v ARCH_TA7V TA7V 335
++icarus SA1100_ICARUS ICARUS 336
++h1900 ARCH_H1900 H1900 337
++gemini SA1100_GEMINI GEMINI 338
++axim ARCH_AXIM AXIM 339
++audiotron ARCH_AUDIOTRON AUDIOTRON 340
++h2200 ARCH_H2200 H2200 341
++loox600 ARCH_LOOX600 LOOX600 342
++niop ARCH_NIOP NIOP 343
++dm310 ARCH_DM310 DM310 344
++seedpxa_c2 ARCH_SEEDPXA_C2 SEEDPXA_C2 345
++ixp4xx_mguardpci ARCH_IXP4XX_MGUARD_PCI IXP4XX_MGUARD_PCI 346
+ h1940 ARCH_H1940 H1940 347
++scorpio ARCH_SCORPIO SCORPIO 348
++viva ARCH_VIVA VIVA 349
++pxa_xcard ARCH_PXA_XCARD PXA_XCARD 350
++csb335 ARCH_CSB335 CSB335 351
++ixrd425 ARCH_IXRD425 IXRD425 352
++iq80315 ARCH_IQ80315 IQ80315 353
++nmp7312 ARCH_NMP7312 NMP7312 354
++cx861xx ARCH_CX861XX CX861XX 355
+ enp2611 ARCH_ENP2611 ENP2611 356
++xda SA1100_XDA XDA 357
++csir_ims ARCH_CSIR_IMS CSIR_IMS 358
++ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359
++pocketserv9200 ARCH_POCKETSERV9200 POCKETSERV9200 360
++toto ARCH_TOTO TOTO 361
+ s3c2440 ARCH_S3C2440 S3C2440 362
++ks8695p ARCH_KS8695P KS8695P 363
++se4000 ARCH_SE4000 SE4000 364
++quadriceps ARCH_QUADRICEPS QUADRICEPS 365
++bronco ARCH_BRONCO BRONCO 366
++esl_wireless_tab ARCH_ESL_WIRELESS_TAB ESL_WIRELESS_TAB 367
++esl_sofcomp ARCH_ESL_SOFCOMP ESL_SOFCOMP 368
++s5c7375 ARCH_S5C7375 S5C7375 369
++spearhead ARCH_SPEARHEAD SPEARHEAD 370
++pantera ARCH_PANTERA PANTERA 371
++prayoglite ARCH_PRAYOGLITE PRAYOGLITE 372
+ gumstix ARCH_GUMSTIX GUMSTIX 373
++rcube ARCH_RCUBE RCUBE 374
++rea_olv ARCH_REA_OLV REA_OLV 375
++pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376
++s3c3410 ARCH_S3C3410 S3C3410 377
++espd_4510b ARCH_ESPD_4510B ESPD_4510B 378
++mp1x ARCH_MP1X MP1X 379
++at91rm9200tb ARCH_AT91RM9200TB AT91RM9200TB 380
++adsvgx ARCH_ADSVGX ADSVGX 381
+ omap_h2 MACH_OMAP_H2 OMAP_H2 382
++pelee ARCH_PELEE PELEE 383
+ e740 MACH_E740 E740 384
+ iq80331 ARCH_IQ80331 IQ80331 385
+ versatile_pb ARCH_VERSATILE_PB VERSATILE_PB 387
+ kev7a400 MACH_KEV7A400 KEV7A400 388
+ lpd7a400 MACH_LPD7A400 LPD7A400 389
+ lpd7a404 MACH_LPD7A404 LPD7A404 390
++fujitsu_camelot ARCH_FUJITSU_CAMELOT FUJITSU_CAMELOT 391
++janus2m ARCH_JANUS2M JANUS2M 392
++embtf MACH_EMBTF EMBTF 393
++hpm MACH_HPM HPM 394
++smdk2410tk MACH_SMDK2410TK SMDK2410TK 395
++smdk2410aj MACH_SMDK2410AJ SMDK2410AJ 396
++streetracer MACH_STREETRACER STREETRACER 397
++eframe MACH_EFRAME EFRAME 398
+ csb337 MACH_CSB337 CSB337 399
++pxa_lark MACH_PXA_LARK PXA_LARK 400
++pxa_pnp2110 MACH_PNP2110 PNP2110 401
++tcc72x MACH_TCC72X TCC72X 402
++altair MACH_ALTAIR ALTAIR 403
++kc3 MACH_KC3 KC3 404
++sinteftd MACH_SINTEFTD SINTEFTD 405
+ mainstone MACH_MAINSTONE MAINSTONE 406
++aday4x MACH_ADAY4X ADAY4X 407
++lite300 MACH_LITE300 LITE300 408
++s5c7376 MACH_S5C7376 S5C7376 409
++mt02 MACH_MT02 MT02 410
++mport3s MACH_MPORT3S MPORT3S 411
++ra_alpha MACH_RA_ALPHA RA_ALPHA 412
+ xcep MACH_XCEP XCEP 413
+ arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414
++stargate MACH_STARGATE STARGATE 415
++armadilloj MACH_ARMADILLOJ ARMADILLOJ 416
++elroy_jack MACH_ELROY_JACK ELROY_JACK 417
++backend MACH_BACKEND BACKEND 418
++s5linbox MACH_S5LINBOX S5LINBOX 419
+ nomadik MACH_NOMADIK NOMADIK 420
++ia_cpu_9200 MACH_IA_CPU_9200 IA_CPU_9200 421
++at91_bja1 MACH_AT91_BJA1 AT91_BJA1 422
+ corgi MACH_CORGI CORGI 423
+ poodle MACH_POODLE POODLE 424
++ten MACH_TEN TEN 425
++roverp5p MACH_ROVERP5P ROVERP5P 426
++sc2700 MACH_SC2700 SC2700 427
++ex_eagle MACH_EX_EAGLE EX_EAGLE 428
++nx_pxa12 MACH_NX_PXA12 NX_PXA12 429
++nx_pxa5 MACH_NX_PXA5 NX_PXA5 430
++blackboard2 MACH_BLACKBOARD2 BLACKBOARD2 431
++i819 MACH_I819 I819 432
++ixmb995e MACH_IXMB995E IXMB995E 433
++skyrider MACH_SKYRIDER SKYRIDER 434
++skyhawk MACH_SKYHAWK SKYHAWK 435
++enterprise MACH_ENTERPRISE ENTERPRISE 436
++dep2410 MACH_DEP2410 DEP2410 437
+ armcore MACH_ARMCORE ARMCORE 438
++hobbit MACH_HOBBIT HOBBIT 439
++h7210 MACH_H7210 H7210 440
++pxa_netdcu5 MACH_PXA_NETDCU5 PXA_NETDCU5 441
++acc MACH_ACC ACC 442
++esl_sarva MACH_ESL_SARVA ESL_SARVA 443
++xm250 MACH_XM250 XM250 444
++t6tc1xb MACH_T6TC1XB T6TC1XB 445
++ess710 MACH_ESS710 ESS710 446
+ mx31ads MACH_MX31ADS MX31ADS 447
+ himalaya MACH_HIMALAYA HIMALAYA 448
++bolfenk MACH_BOLFENK BOLFENK 449
++at91rm9200kr MACH_AT91RM9200KR AT91RM9200KR 450
+ edb9312 MACH_EDB9312 EDB9312 451
+ omap_generic MACH_OMAP_GENERIC OMAP_GENERIC 452
++aximx3 MACH_AXIMX3 AXIMX3 453
++eb67xdip MACH_EB67XDIP EB67XDIP 454
++webtxs MACH_WEBTXS WEBTXS 455
++hawk MACH_HAWK HAWK 456
++ccat91sbc001 MACH_CCAT91SBC001 CCAT91SBC001 457
++expresso MACH_EXPRESSO EXPRESSO 458
++h4000 MACH_H4000 H4000 459
++dino MACH_DINO DINO 460
++ml675k MACH_ML675K ML675K 461
+ edb9301 MACH_EDB9301 EDB9301 462
+ edb9315 MACH_EDB9315 EDB9315 463
++reciva_tt MACH_RECIVA_TT RECIVA_TT 464
++cstcb01 MACH_CSTCB01 CSTCB01 465
++cstcb1 MACH_CSTCB1 CSTCB1 466
++shadwell MACH_SHADWELL SHADWELL 467
++goepel263 MACH_GOEPEL263 GOEPEL263 468
++acq100 MACH_ACQ100 ACQ100 469
++mx1fs2 MACH_MX1FS2 MX1FS2 470
++hiptop_g1 MACH_HIPTOP_G1 HIPTOP_G1 471
++sparky MACH_SPARKY SPARKY 472
++ns9750 MACH_NS9750 NS9750 473
++phoenix MACH_PHOENIX PHOENIX 474
+ vr1000 MACH_VR1000 VR1000 475
++deisterpxa MACH_DEISTERPXA DEISTERPXA 476
++bcm1160 MACH_BCM1160 BCM1160 477
++pcm022 MACH_PCM022 PCM022 478
++adsgcx MACH_ADSGCX ADSGCX 479
++dreadnaught MACH_DREADNAUGHT DREADNAUGHT 480
++dm320 MACH_DM320 DM320 481
++markov MACH_MARKOV MARKOV 482
++cos7a400 MACH_COS7A400 COS7A400 483
++milano MACH_MILANO MILANO 484
++ue9328 MACH_UE9328 UE9328 485
++uex255 MACH_UEX255 UEX255 486
++ue2410 MACH_UE2410 UE2410 487
++a620 MACH_A620 A620 488
++ocelot MACH_OCELOT OCELOT 489
++cheetah MACH_CHEETAH CHEETAH 490
+ omap_perseus2 MACH_OMAP_PERSEUS2 OMAP_PERSEUS2 491
++zvue MACH_ZVUE ZVUE 492
++roverp1 MACH_ROVERP1 ROVERP1 493
++asidial2 MACH_ASIDIAL2 ASIDIAL2 494
++s3c24a0 MACH_S3C24A0 S3C24A0 495
+ e800 MACH_E800 E800 496
+ e750 MACH_E750 E750 497
++s3c5500 MACH_S3C5500 S3C5500 498
++smdk5500 MACH_SMDK5500 SMDK5500 499
++signalsync MACH_SIGNALSYNC SIGNALSYNC 500
++nbc MACH_NBC NBC 501
++kodiak MACH_KODIAK KODIAK 502
++netbookpro MACH_NETBOOKPRO NETBOOKPRO 503
++hw90200 MACH_HW90200 HW90200 504
++condor MACH_CONDOR CONDOR 505
++cup MACH_CUP CUP 506
++kite MACH_KITE KITE 507
+ scb9328 MACH_SCB9328 SCB9328 508
+ omap_h3 MACH_OMAP_H3 OMAP_H3 509
+ omap_h4 MACH_OMAP_H4 OMAP_H4 510
++n10 MACH_N10 N10 511
++montejade MACH_MONTAJADE MONTAJADE 512
++sg560 MACH_SG560 SG560 513
++dp1000 MACH_DP1000 DP1000 514
+ omap_osk MACH_OMAP_OSK OMAP_OSK 515
++rg100v3 MACH_RG100V3 RG100V3 516
++mx2ads MACH_MX2ADS MX2ADS 517
++pxa_kilo MACH_PXA_KILO PXA_KILO 518
++ixp4xx_eagle MACH_IXP4XX_EAGLE IXP4XX_EAGLE 519
+ tosa MACH_TOSA TOSA 520
++mb2520f MACH_MB2520F MB2520F 521
++emc1000 MACH_EMC1000 EMC1000 522
++tidsc25 MACH_TIDSC25 TIDSC25 523
++akcpmxl MACH_AKCPMXL AKCPMXL 524
++av3xx MACH_AV3XX AV3XX 525
+ avila MACH_AVILA AVILA 526
++pxa_mpm10 MACH_PXA_MPM10 PXA_MPM10 527
++pxa_kyanite MACH_PXA_KYANITE PXA_KYANITE 528
++sgold MACH_SGOLD SGOLD 529
++oscar MACH_OSCAR OSCAR 530
++epxa4usb2 MACH_EPXA4USB2 EPXA4USB2 531
++xsengine MACH_XSENGINE XSENGINE 532
++ip600 MACH_IP600 IP600 533
++mcan2 MACH_MCAN2 MCAN2 534
++ddi_blueridge MACH_DDI_BLUERIDGE DDI_BLUERIDGE 535
++skyminder MACH_SKYMINDER SKYMINDER 536
++lpd79520 MACH_LPD79520 LPD79520 537
+ edb9302 MACH_EDB9302 EDB9302 538
++hw90340 MACH_HW90340 HW90340 539
++cip_box MACH_CIP_BOX CIP_BOX 540
++ivpn MACH_IVPN IVPN 541
++rsoc2 MACH_RSOC2 RSOC2 542
+ husky MACH_HUSKY HUSKY 543
++boxer MACH_BOXER BOXER 544
+ shepherd MACH_SHEPHERD SHEPHERD 545
++aml42800aa MACH_AML42800AA AML42800AA 546
++lpc2294 MACH_LPC2294 LPC2294 548
++switchgrass MACH_SWITCHGRASS SWITCHGRASS 549
++ens_cmu MACH_ENS_CMU ENS_CMU 550
++mm6_sdb MACH_MM6_SDB MM6_SDB 551
++saturn MACH_SATURN SATURN 552
++i30030evb MACH_I30030EVB I30030EVB 553
++mxc27530evb MACH_MXC27530EVB MXC27530EVB 554
++smdk2800 MACH_SMDK2800 SMDK2800 555
++mtwilson MACH_MTWILSON MTWILSON 556
++ziti MACH_ZITI ZITI 557
++grandfather MACH_GRANDFATHER GRANDFATHER 558
++tengine MACH_TENGINE TENGINE 559
++s3c2460 MACH_S3C2460 S3C2460 560
++pdm MACH_PDM PDM 561
+ h4700 MACH_H4700 H4700 562
++h6300 MACH_H6300 H6300 563
++rz1700 MACH_RZ1700 RZ1700 564
++a716 MACH_A716 A716 565
++estk2440a MACH_ESTK2440A ESTK2440A 566
++atwixp425 MACH_ATWIXP425 ATWIXP425 567
++csb336 MACH_CSB336 CSB336 568
++rirm2 MACH_RIRM2 RIRM2 569
++cx23518 MACH_CX23518 CX23518 570
++cx2351x MACH_CX2351X CX2351X 571
++computime MACH_COMPUTIME COMPUTIME 572
++izarus MACH_IZARUS IZARUS 573
++pxa_rts MACH_RTS RTS 574
++se5100 MACH_SE5100 SE5100 575
++s3c2510 MACH_S3C2510 S3C2510 576
++csb437tl MACH_CSB437TL CSB437TL 577
++slauson MACH_SLAUSON SLAUSON 578
++pearlriver MACH_PEARLRIVER PEARLRIVER 579
++tdc_p210 MACH_TDC_P210 TDC_P210 580
++sg580 MACH_SG580 SG580 581
++wrsbcarm7 MACH_WRSBCARM7 WRSBCARM7 582
++ipd MACH_IPD IPD 583
++pxa_dnp2110 MACH_PXA_DNP2110 PXA_DNP2110 584
++xaeniax MACH_XAENIAX XAENIAX 585
++somn4250 MACH_SOMN4250 SOMN4250 586
++pleb2 MACH_PLEB2 PLEB2 587
++cornwallis MACH_CORNWALLIS CORNWALLIS 588
++gurney_drv MACH_GURNEY_DRV GURNEY_DRV 589
++chaffee MACH_CHAFFEE CHAFFEE 590
++rms101 MACH_RMS101 RMS101 591
+ rx3715 MACH_RX3715 RX3715 592
++swift MACH_SWIFT SWIFT 593
++roverp7 MACH_ROVERP7 ROVERP7 594
++pr818s MACH_PR818S PR818S 595
++trxpro MACH_TRXPRO TRXPRO 596
+ nslu2 MACH_NSLU2 NSLU2 597
+ e400 MACH_E400 E400 598
++trab MACH_TRAB TRAB 599
++cmc_pu2 MACH_CMC_PU2 CMC_PU2 600
++fulcrum MACH_FULCRUM FULCRUM 601
++netgate42x MACH_NETGATE42X NETGATE42X 602
++str710 MACH_STR710 STR710 603
+ ixdpg425 MACH_IXDPG425 IXDPG425 604
++tomtomgo MACH_TOMTOMGO TOMTOMGO 605
+ versatile_ab MACH_VERSATILE_AB VERSATILE_AB 606
+ edb9307 MACH_EDB9307 EDB9307 607
++sg565 MACH_SG565 SG565 608
++lpd79524 MACH_LPD79524 LPD79524 609
++lpd79525 MACH_LPD79525 LPD79525 610
++rms100 MACH_RMS100 RMS100 611
+ kb9200 MACH_KB9200 KB9200 612
+ sx1 MACH_SX1 SX1 613
++hms39c7092 MACH_HMS39C7092 HMS39C7092 614
++armadillo MACH_ARMADILLO ARMADILLO 615
++ipcu MACH_IPCU IPCU 616
++loox720 MACH_LOOX720 LOOX720 617
+ ixdp465 MACH_IXDP465 IXDP465 618
+ ixdp2351 MACH_IXDP2351 IXDP2351 619
++adsvix MACH_ADSVIX ADSVIX 620
++dm270 MACH_DM270 DM270 621
++socltplus MACH_SOCLTPLUS SOCLTPLUS 622
++ecia MACH_ECIA ECIA 623
++cm4008 MACH_CM4008 CM4008 624
++p2001 MACH_P2001 P2001 625
++twister MACH_TWISTER TWISTER 626
++mudshark MACH_MUDSHARK MUDSHARK 627
++hb2 MACH_HB2 HB2 628
+ iq80332 MACH_IQ80332 IQ80332 629
++sendt MACH_SENDT SENDT 630
++mx2jazz MACH_MX2JAZZ MX2JAZZ 631
++multiio MACH_MULTIIO MULTIIO 632
++hrdisplay MACH_HRDISPLAY HRDISPLAY 633
++mxc27530ads MACH_MXC27530ADS MXC27530ADS 634
++trizeps3 MACH_TRIZEPS3 TRIZEPS3 635
++zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636
++zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637
++zefeerdzg MACH_ZEFEERDZG ZEFEERDZG 638
++zefeerdzn MACH_ZEFEERDZN ZEFEERDZN 639
++zefeerdzq MACH_ZEFEERDZQ ZEFEERDZQ 640
+ gtwx5715 MACH_GTWX5715 GTWX5715 641
++astro_jack MACH_ASTRO_JACK ASTRO_JACK 643
++tip03 MACH_TIP03 TIP03 644
++a9200ec MACH_A9200EC A9200EC 645
++pnx0105 MACH_PNX0105 PNX0105 646
++adcpoecpu MACH_ADCPOECPU ADCPOECPU 647
+ csb637 MACH_CSB637 CSB637 648
++mb9200 MACH_MB9200 MB9200 650
++kulun MACH_KULUN KULUN 651
++snapper MACH_SNAPPER SNAPPER 652
++optima MACH_OPTIMA OPTIMA 653
++dlhsbc MACH_DLHSBC DLHSBC 654
++x30 MACH_X30 X30 655
+ n30 MACH_N30 N30 656
++manga_ks8695 MACH_MANGA_KS8695 MANGA_KS8695 657
++ajax MACH_AJAX AJAX 658
+ nec_mp900 MACH_NEC_MP900 NEC_MP900 659
++vvtk1000 MACH_VVTK1000 VVTK1000 661
+ kafa MACH_KAFA KAFA 662
++vvtk3000 MACH_VVTK3000 VVTK3000 663
++pimx1 MACH_PIMX1 PIMX1 664
++ollie MACH_OLLIE OLLIE 665
++skymax MACH_SKYMAX SKYMAX 666
++jazz MACH_JAZZ JAZZ 667
++tel_t3 MACH_TEL_T3 TEL_T3 668
++aisino_fcr255 MACH_AISINO_FCR255 AISINO_FCR255 669
++btweb MACH_BTWEB BTWEB 670
++dbg_lh79520 MACH_DBG_LH79520 DBG_LH79520 671
++cm41xx MACH_CM41XX CM41XX 672
+ ts72xx MACH_TS72XX TS72XX 673
++nggpxa MACH_NGGPXA NGGPXA 674
++csb535 MACH_CSB535 CSB535 675
++csb536 MACH_CSB536 CSB536 676
++pxa_trakpod MACH_PXA_TRAKPOD PXA_TRAKPOD 677
++praxis MACH_PRAXIS PRAXIS 678
++lh75411 MACH_LH75411 LH75411 679
+ otom MACH_OTOM OTOM 680
+ nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681
++loox410 MACH_LOOX410 LOOX410 682
++westlake MACH_WESTLAKE WESTLAKE 683
++nsb MACH_NSB NSB 684
++esl_sarva_stn MACH_ESL_SARVA_STN ESL_SARVA_STN 685
++esl_sarva_tft MACH_ESL_SARVA_TFT ESL_SARVA_TFT 686
++esl_sarva_iad MACH_ESL_SARVA_IAD ESL_SARVA_IAD 687
++esl_sarva_acc MACH_ESL_SARVA_ACC ESL_SARVA_ACC 688
++typhoon MACH_TYPHOON TYPHOON 689
++cnav MACH_CNAV CNAV 690
++a730 MACH_A730 A730 691
++netstar MACH_NETSTAR NETSTAR 692
++supercon MACH_PHASEFALE_SUPERCON PHASEFALE_SUPERCON 693
++shiva1100 MACH_SHIVA1100 SHIVA1100 694
++etexsc MACH_ETEXSC ETEXSC 695
++ixdpg465 MACH_IXDPG465 IXDPG465 696
++a9m2410 MACH_A9M2410 A9M2410 697
++a9m2440 MACH_A9M2440 A9M2440 698
++a9m9750 MACH_A9M9750 A9M9750 699
++a9m9360 MACH_A9M9360 A9M9360 700
++unc90 MACH_UNC90 UNC90 701
+ eco920 MACH_ECO920 ECO920 702
++satview MACH_SATVIEW SATVIEW 703
+ roadrunner MACH_ROADRUNNER ROADRUNNER 704
+ at91rm9200ek MACH_AT91RM9200EK AT91RM9200EK 705
++gp32 MACH_GP32 GP32 706
++gem MACH_GEM GEM 707
++i858 MACH_I858 I858 708
++hx2750 MACH_HX2750 HX2750 709
++mxc91131evb MACH_MXC91131EVB MXC91131EVB 710
++p700 MACH_P700 P700 711
++cpe MACH_CPE CPE 712
+ spitz MACH_SPITZ SPITZ 713
++nimbra340 MACH_NIMBRA340 NIMBRA340 714
++lpc22xx MACH_LPC22XX LPC22XX 715
++omap_comet3 MACH_COMET3 COMET3 716
++omap_comet4 MACH_COMET4 COMET4 717
++csb625 MACH_CSB625 CSB625 718
++fortunet2 MACH_FORTUNET2 FORTUNET2 719
++s5h2200 MACH_S5H2200 S5H2200 720
++optorm920 MACH_OPTORM920 OPTORM920 721
++adsbitsyxb MACH_ADSBITSYXB ADSBITSYXB 722
+ adssphere MACH_ADSSPHERE ADSSPHERE 723
++adsportal MACH_ADSPORTAL ADSPORTAL 724
++ln2410sbc MACH_LN2410SBC LN2410SBC 725
++cb3rufc MACH_CB3RUFC CB3RUFC 726
++mp2usb MACH_MP2USB MP2USB 727
++ntnp425c MACH_NTNP425C NTNP425C 728
+ colibri MACH_COLIBRI COLIBRI 729
++pcm7220 MACH_PCM7220 PCM7220 730
+ gateway7001 MACH_GATEWAY7001 GATEWAY7001 731
+ pcm027 MACH_PCM027 PCM027 732
++cmpxa MACH_CMPXA CMPXA 733
+ anubis MACH_ANUBIS ANUBIS 734
++ite8152 MACH_ITE8152 ITE8152 735
++lpc3xxx MACH_LPC3XXX LPC3XXX 736
++puppeteer MACH_PUPPETEER PUPPETEER 737
++e570 MACH_E570 E570 739
++x50 MACH_X50 X50 740
++recon MACH_RECON RECON 741
++xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742
++fpic2 MACH_FPIC2 FPIC2 743
+ akita MACH_AKITA AKITA 744
++a81 MACH_A81 A81 745
++svm_sc25x MACH_SVM_SC25X SVM_SC25X 746
++vt020 MACH_VADATECH020 VADATECH020 747
++tli MACH_TLI TLI 748
++edb9315lc MACH_EDB9315LC EDB9315LC 749
++passec MACH_PASSEC PASSEC 750
++ds_tiger MACH_DS_TIGER DS_TIGER 751
++e310 MACH_E310 E310 752
+ e330 MACH_E330 E330 753
++rt3000 MACH_RT3000 RT3000 754
+ nokia770 MACH_NOKIA770 NOKIA770 755
++pnx0106 MACH_PNX0106 PNX0106 756
++hx21xx MACH_HX21XX HX21XX 757
++faraday MACH_FARADAY FARADAY 758
++sbc9312 MACH_SBC9312 SBC9312 759
++batman MACH_BATMAN BATMAN 760
++jpd201 MACH_JPD201 JPD201 761
++mipsa MACH_MIPSA MIPSA 762
++kacom MACH_KACOM KACOM 763
++swarcocpu MACH_SWARCOCPU SWARCOCPU 764
++swarcodsl MACH_SWARCODSL SWARCODSL 765
++blueangel MACH_BLUEANGEL BLUEANGEL 766
++hairygrama MACH_HAIRYGRAMA HAIRYGRAMA 767
++banff MACH_BANFF BANFF 768
+ carmeva MACH_CARMEVA CARMEVA 769
++sam255 MACH_SAM255 SAM255 770
++ppm10 MACH_PPM10 PPM10 771
+ edb9315a MACH_EDB9315A EDB9315A 772
++sunset MACH_SUNSET SUNSET 773
+ stargate2 MACH_STARGATE2 STARGATE2 774
+ intelmote2 MACH_INTELMOTE2 INTELMOTE2 775
+ trizeps4 MACH_TRIZEPS4 TRIZEPS4 776
++mainstone2 MACH_MAINSTONE2 MAINSTONE2 777
++ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778
++tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779
++universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780
++hicoarm9 MACH_HICOARM9 HICOARM9 781
+ pnx4008 MACH_PNX4008 PNX4008 782
++kws6000 MACH_KWS6000 KWS6000 783
++portux920t MACH_PORTUX920T PORTUX920T 784
++ez_x5 MACH_EZ_X5 EZ_X5 785
++omap_rudolph MACH_OMAP_RUDOLPH OMAP_RUDOLPH 786
+ cpuat91 MACH_CPUAT91 CPUAT91 787
++rea9200 MACH_REA9200 REA9200 788
++acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789
++ixp425 MACH_IXP425 IXP425 790
++i30030ads MACH_I30030ADS I30030ADS 791
++perch MACH_PERCH PERCH 792
++eis05r1 MACH_EIS05R1 EIS05R1 793
++pepperpad MACH_PEPPERPAD PEPPERPAD 794
++sb3010 MACH_SB3010 SB3010 795
++rm9200 MACH_RM9200 RM9200 796
++dma03 MACH_DMA03 DMA03 797
++road_s101 MACH_ROAD_S101 ROAD_S101 798
+ iq81340sc MACH_IQ81340SC IQ81340SC 799
++iq_nextgen_b MACH_IQ_NEXTGEN_B IQ_NEXTGEN_B 800
+ iq81340mc MACH_IQ81340MC IQ81340MC 801
++iq_nextgen_d MACH_IQ_NEXTGEN_D IQ_NEXTGEN_D 802
++iq_nextgen_e MACH_IQ_NEXTGEN_E IQ_NEXTGEN_E 803
++mallow_at91 MACH_MALLOW_AT91 MALLOW_AT91 804
++cybertracker_i MACH_CYBERTRACKER_I CYBERTRACKER_I 805
++gesbc931x MACH_GESBC931X GESBC931X 806
++centipad MACH_CENTIPAD CENTIPAD 807
++armsoc MACH_ARMSOC ARMSOC 808
++se4200 MACH_SE4200 SE4200 809
++ems197a MACH_EMS197A EMS197A 810
+ micro9 MACH_MICRO9 MICRO9 811
+ micro9l MACH_MICRO9L MICRO9L 812
++uc5471dsp MACH_UC5471DSP UC5471DSP 813
++sj5471eng MACH_SJ5471ENG SJ5471ENG 814
++none MACH_CMPXA26X CMPXA26X 815
++nc1 MACH_NC NC 816
+ omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817
++ajax52x MACH_AJAX52X AJAX52X 818
++siriustar MACH_SIRIUSTAR SIRIUSTAR 819
++iodata_hdlg MACH_IODATA_HDLG IODATA_HDLG 820
++at91rm9200utl MACH_AT91RM9200UTL AT91RM9200UTL 821
++biosafe MACH_BIOSAFE BIOSAFE 822
++mp1000 MACH_MP1000 MP1000 823
++parsy MACH_PARSY PARSY 824
++ccxp270 MACH_CCXP CCXP 825
++omap_gsample MACH_OMAP_GSAMPLE OMAP_GSAMPLE 826
+ realview_eb MACH_REALVIEW_EB REALVIEW_EB 827
++samoa MACH_SAMOA SAMOA 828
++palmt3 MACH_PALMT3 PALMT3 829
++i878 MACH_I878 I878 830
+ borzoi MACH_BORZOI BORZOI 831
++gecko MACH_GECKO GECKO 832
++ds101 MACH_DS101 DS101 833
++omap_palmtt2 MACH_OMAP_PALMTT2 OMAP_PALMTT2 834
+ palmld MACH_PALMLD PALMLD 835
++cc9c MACH_CC9C CC9C 836
++sbc1670 MACH_SBC1670 SBC1670 837
+ ixdp28x5 MACH_IXDP28X5 IXDP28X5 838
+ omap_palmtt MACH_OMAP_PALMTT OMAP_PALMTT 839
++ml696k MACH_ML696K ML696K 840
+ arcom_zeus MACH_ARCOM_ZEUS ARCOM_ZEUS 841
+ osiris MACH_OSIRIS OSIRIS 842
++maestro MACH_MAESTRO MAESTRO 843
+ palmte2 MACH_PALMTE2 PALMTE2 844
++ixbbm MACH_IXBBM IXBBM 845
+ mx27ads MACH_MX27ADS MX27ADS 846
++ax8004 MACH_AX8004 AX8004 847
+ at91sam9261ek MACH_AT91SAM9261EK AT91SAM9261EK 848
+ loft MACH_LOFT LOFT 849
++magpie MACH_MAGPIE MAGPIE 850
+ mx21ads MACH_MX21ADS MX21ADS 851
++mb87m3400 MACH_MB87M3400 MB87M3400 852
++mguard_delta MACH_MGUARD_DELTA MGUARD_DELTA 853
++davinci_dvdp MACH_DAVINCI_DVDP DAVINCI_DVDP 854
++htcuniversal MACH_HTCUNIVERSAL HTCUNIVERSAL 855
++tpad MACH_TPAD TPAD 856
++roverp3 MACH_ROVERP3 ROVERP3 857
++jornada928 MACH_JORNADA928 JORNADA928 858
++mv88fxx81 MACH_MV88FXX81 MV88FXX81 859
++stmp36xx MACH_STMP36XX STMP36XX 860
++sxni79524 MACH_SXNI79524 SXNI79524 861
+ ams_delta MACH_AMS_DELTA AMS_DELTA 862
++uranium MACH_URANIUM URANIUM 863
++ucon MACH_UCON UCON 864
+ nas100d MACH_NAS100D NAS100D 865
++l083 MACH_L083_1000 L083_1000 866
++ezx MACH_EZX EZX 867
++pnx5220 MACH_PNX5220 PNX5220 868
++butte MACH_BUTTE BUTTE 869
++srm2 MACH_SRM2 SRM2 870
++dsbr MACH_DSBR DSBR 871
++crystalball MACH_CRYSTALBALL CRYSTALBALL 872
++tinypxa27x MACH_TINYPXA27X TINYPXA27X 873
++herbie MACH_HERBIE HERBIE 874
+ magician MACH_MAGICIAN MAGICIAN 875
++cm4002 MACH_CM4002 CM4002 876
++b4 MACH_B4 B4 877
++maui MACH_MAUI MAUI 878
++cybertracker_g MACH_CYBERTRACKER_G CYBERTRACKER_G 879
+ nxdkn MACH_NXDKN NXDKN 880
++mio8390 MACH_MIO8390 MIO8390 881
++omi_board MACH_OMI_BOARD OMI_BOARD 882
++mx21civ MACH_MX21CIV MX21CIV 883
++mahi_cdac MACH_MAHI_CDAC MAHI_CDAC 884
+ palmtx MACH_PALMTX PALMTX 885
+ s3c2413 MACH_S3C2413 S3C2413 887
++samsys_ep0 MACH_SAMSYS_EP0 SAMSYS_EP0 888
++wg302v1 MACH_WG302V1 WG302V1 889
+ wg302v2 MACH_WG302V2 WG302V2 890
++eb42x MACH_EB42X EB42X 891
++iq331es MACH_IQ331ES IQ331ES 892
++cosydsp MACH_COSYDSP COSYDSP 893
++uplat7d_proto MACH_UPLAT7D UPLAT7D 894
++ptdavinci MACH_PTDAVINCI PTDAVINCI 895
++mbus MACH_MBUS MBUS 896
++nadia2vb MACH_NADIA2VB NADIA2VB 897
++r1000 MACH_R1000 R1000 898
++hw90250 MACH_HW90250 HW90250 899
+ omap_2430sdp MACH_OMAP_2430SDP OMAP_2430SDP 900
+ davinci_evm MACH_DAVINCI_EVM DAVINCI_EVM 901
++omap_tornado MACH_OMAP_TORNADO OMAP_TORNADO 902
++olocreek MACH_OLOCREEK OLOCREEK 903
+ palmz72 MACH_PALMZ72 PALMZ72 904
+ nxdb500 MACH_NXDB500 NXDB500 905
+ apf9328 MACH_APF9328 APF9328 906
++omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907
++omap_twip MACH_OMAP_TWIP OMAP_TWIP 908
++treo650 MACH_TREO650 TREO650 909
++acumen MACH_ACUMEN ACUMEN 910
++xp100 MACH_XP100 XP100 911
++fs2410 MACH_FS2410 FS2410 912
++pxa270_cerf MACH_PXA270_CERF PXA270_CERF 913
++sq2ftlpalm MACH_SQ2FTLPALM SQ2FTLPALM 914
++bsemserver MACH_BSEMSERVER BSEMSERVER 915
++netclient MACH_NETCLIENT NETCLIENT 916
+ palmt5 MACH_PALMT5 PALMT5 917
+ palmtc MACH_PALMTC PALMTC 918
+ omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
++mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
++rea_cpu2 MACH_REA_2D REA_2D 921
++eti3e524 MACH_TI3E524 TI3E524 922
+ ateb9200 MACH_ATEB9200 ATEB9200 923
++auckland MACH_AUCKLAND AUCKLAND 924
++ak3220m MACH_AK3320M AK3320M 925
++duramax MACH_DURAMAX DURAMAX 926
+ n35 MACH_N35 N35 927
++pronghorn MACH_PRONGHORN PRONGHORN 928
++fundy MACH_FUNDY FUNDY 929
+ logicpd_pxa270 MACH_LOGICPD_PXA270 LOGICPD_PXA270 930
++cpu777 MACH_CPU777 CPU777 931
++simicon9201 MACH_SIMICON9201 SIMICON9201 932
++leap2_hpm MACH_LEAP2_HPM LEAP2_HPM 933
++cm922txa10 MACH_CM922TXA10 CM922TXA10 934
++sandgate MACH_PXA PXA 935
++sandgate2 MACH_SANDGATE2 SANDGATE2 936
++sandgate2g MACH_SANDGATE2G SANDGATE2G 937
++sandgate2p MACH_SANDGATE2P SANDGATE2P 938
++fred_jack MACH_FRED_JACK FRED_JACK 939
++ttg_color1 MACH_TTG_COLOR1 TTG_COLOR1 940
+ nxeb500hmi MACH_NXEB500HMI NXEB500HMI 941
++netdcu8 MACH_NETDCU8 NETDCU8 942
++ng_fvx538 MACH_NG_FVX538 NG_FVX538 944
++ng_fvs338 MACH_NG_FVS338 NG_FVS338 945
++pnx4103 MACH_PNX4103 PNX4103 946
++hesdb MACH_HESDB HESDB 947
++xsilo MACH_XSILO XSILO 948
+ espresso MACH_ESPRESSO ESPRESSO 949
++emlc MACH_EMLC EMLC 950
++sisteron MACH_SISTERON SISTERON 951
+ rx1950 MACH_RX1950 RX1950 952
++tsc_venus MACH_TSC_VENUS TSC_VENUS 953
++ds101j MACH_DS101J DS101J 954
++mxc30030ads MACH_MXC30030ADS MXC30030ADS 955
++fujitsu_wimaxsoc MACH_FUJITSU_WIMAXSOC FUJITSU_WIMAXSOC 956
++dualpcmodem MACH_DUALPCMODEM DUALPCMODEM 957
+ gesbc9312 MACH_GESBC9312 GESBC9312 958
++htcapache MACH_HTCAPACHE HTCAPACHE 959
++ixdp435 MACH_IXDP435 IXDP435 960
++catprovt100 MACH_CATPROVT100 CATPROVT100 961
++picotux1xx MACH_PICOTUX1XX PICOTUX1XX 962
+ picotux2xx MACH_PICOTUX2XX PICOTUX2XX 963
+ dsmg600 MACH_DSMG600 DSMG600 964
++empc2 MACH_EMPC2 EMPC2 965
++ventura MACH_VENTURA VENTURA 966
++phidget_sbc MACH_PHIDGET_SBC PHIDGET_SBC 967
++ij3k MACH_IJ3K IJ3K 968
++pisgah MACH_PISGAH PISGAH 969
+ omap_fsample MACH_OMAP_FSAMPLE OMAP_FSAMPLE 970
++sg720 MACH_SG720 SG720 971
++redfox MACH_REDFOX REDFOX 972
++mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973
++tpf106 MACH_TPF106 TPF106 974
++at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975
++rcmt2 MACH_SLEDB SLEDB 976
++ontrack MACH_ONTRACK ONTRACK 977
++pm1200 MACH_PM1200 PM1200 978
++ess24562 MACH_ESS24XXX ESS24XXX 979
++coremp7 MACH_COREMP7 COREMP7 980
++nexcoder_6446 MACH_NEXCODER_6446 NEXCODER_6446 981
++stvc8380 MACH_STVC8380 STVC8380 982
++teklynx MACH_TEKLYNX TEKLYNX 983
++carbonado MACH_CARBONADO CARBONADO 984
++sysmos_mp730 MACH_SYSMOS_MP730 SYSMOS_MP730 985
+ snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986
++pgigim MACH_PGIGIM PGIGIM 987
++ptx9160p2 MACH_PTX9160P2 PTX9160P2 988
++dcore1 MACH_DCORE1 DCORE1 989
++victorpxa MACH_VICTORPXA VICTORPXA 990
++mx2dtb MACH_MX2DTB MX2DTB 991
++pxa_irex_er0100 MACH_PXA_IREX_ER0100 PXA_IREX_ER0100 992
+ omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993
++bartec_deg MACH_BARTEC_DEG BARTEC_DEG 994
++hw50251 MACH_HW50251 HW50251 995
++ibox MACH_IBOX IBOX 996
++atlaslh7a404 MACH_ATLASLH7A404 ATLASLH7A404 997
++pt2026 MACH_PT2026 PT2026 998
++htcalpine MACH_HTCALPINE HTCALPINE 999
++bartec_vtu MACH_BARTEC_VTU BARTEC_VTU 1000
++vcoreii MACH_VCOREII VCOREII 1001
++pdnb3 MACH_PDNB3 PDNB3 1002
++htcbeetles MACH_HTCBEETLES HTCBEETLES 1003
++s3c6400 MACH_S3C6400 S3C6400 1004
++s3c2443 MACH_S3C2443 S3C2443 1005
++omap_ldk MACH_OMAP_LDK OMAP_LDK 1006
++smdk2460 MACH_SMDK2460 SMDK2460 1007
++smdk2440 MACH_SMDK2440 SMDK2440 1008
+ smdk2412 MACH_SMDK2412 SMDK2412 1009
++webbox MACH_WEBBOX WEBBOX 1010
++cwwndp MACH_CWWNDP CWWNDP 1011
++i839 MACH_DRAGON DRAGON 1012
++opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013
++ccm2200 MACH_CCM2200 CCM2200 1014
++etwarm MACH_ETWARM ETWARM 1015
++m93030 MACH_M93030 M93030 1016
++cc7u MACH_CC7U CC7U 1017
++mtt_ranger MACH_MTT_RANGER MTT_RANGER 1018
++nexus MACH_NEXUS NEXUS 1019
++desman MACH_DESMAN DESMAN 1020
++bkde303 MACH_BKDE303 BKDE303 1021
+ smdk2413 MACH_SMDK2413 SMDK2413 1022
++aml_m7200 MACH_AML_M7200 AML_M7200 1023
+ aml_m5900 MACH_AML_M5900 AML_M5900 1024
++sg640 MACH_SG640 SG640 1025
++edg79524 MACH_EDG79524 EDG79524 1026
++ai2410 MACH_AI2410 AI2410 1027
++ixp465 MACH_IXP465 IXP465 1028
+ balloon3 MACH_BALLOON3 BALLOON3 1029
++heins MACH_HEINS HEINS 1030
++mpluseva MACH_MPLUSEVA MPLUSEVA 1031
++rt042 MACH_RT042 RT042 1032
++cwiem MACH_CWIEM CWIEM 1033
++cm_x270 MACH_CM_X270 CM_X270 1034
++cm_x255 MACH_CM_X255 CM_X255 1035
++esh_at91 MACH_ESH_AT91 ESH_AT91 1036
++sandgate3 MACH_SANDGATE3 SANDGATE3 1037
++primo MACH_PRIMO PRIMO 1038
++gemstone MACH_GEMSTONE GEMSTONE 1039
++pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040
++sidewinder MACH_SIDEWINDER SIDEWINDER 1041
++picomod1 MACH_PICOMOD1 PICOMOD1 1042
++sg590 MACH_SG590 SG590 1043
++akai9307 MACH_AKAI9307 AKAI9307 1044
++fontaine MACH_FONTAINE FONTAINE 1045
++wombat MACH_WOMBAT WOMBAT 1046
++acq300 MACH_ACQ300 ACQ300 1047
++mod272 MACH_MOD_270 MOD_270 1048
++vmc_vc0820 MACH_VC0820 VC0820 1049
++ani_aim MACH_ANI_AIM ANI_AIM 1050
++jellyfish MACH_JELLYFISH JELLYFISH 1051
++amanita MACH_AMANITA AMANITA 1052
++vlink MACH_VLINK VLINK 1053
++dexflex MACH_DEXFLEX DEXFLEX 1054
++eigen_ttq MACH_EIGEN_TTQ EIGEN_TTQ 1055
++arcom_titan MACH_ARCOM_TITAN ARCOM_TITAN 1056
++tabla MACH_TABLA TABLA 1057
++mdirac3 MACH_MDIRAC3 MDIRAC3 1058
++mrhfbp2 MACH_MRHFBP2 MRHFBP2 1059
++at91rm9200rb MACH_AT91RM9200RB AT91RM9200RB 1060
++ani_apm MACH_ANI_APM ANI_APM 1061
++ella1 MACH_ELLA1 ELLA1 1062
++inhand_pxa27x MACH_INHAND_PXA27X INHAND_PXA27X 1063
++inhand_pxa25x MACH_INHAND_PXA25X INHAND_PXA25X 1064
++empos_xm MACH_EMPOS_XM EMPOS_XM 1065
++empos MACH_EMPOS EMPOS 1066
++empos_tiny MACH_EMPOS_TINY EMPOS_TINY 1067
++empos_sm MACH_EMPOS_SM EMPOS_SM 1068
++egret MACH_EGRET EGRET 1069
++ostrich MACH_OSTRICH OSTRICH 1070
++n50 MACH_N50 N50 1071
+ ecbat91 MACH_ECBAT91 ECBAT91 1072
++stareast MACH_STAREAST STAREAST 1073
++dspg_dw MACH_DSPG_DW DSPG_DW 1074
+ onearm MACH_ONEARM ONEARM 1075
++mrg110_6 MACH_MRG110_6 MRG110_6 1076
++wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077
++xm_bulverde MACH_XM_BULVERDE XM_BULVERDE 1078
++msm6100 MACH_MSM6100 MSM6100 1079
++eti_b1 MACH_ETI_B1 ETI_B1 1080
++za9l_series MACH_ZILOG_ZA9L ZILOG_ZA9L 1081
++bit2440 MACH_BIT2440 BIT2440 1082
++nbi MACH_NBI NBI 1083
+ smdk2443 MACH_SMDK2443 SMDK2443 1084
++vdavinci MACH_VDAVINCI VDAVINCI 1085
++atc6 MACH_ATC6 ATC6 1086
++multmdw MACH_MULTMDW MULTMDW 1087
++mba2440 MACH_MBA2440 MBA2440 1088
++ecsd MACH_ECSD ECSD 1089
++palmz31 MACH_PALMZ31 PALMZ31 1090
+ fsg MACH_FSG FSG 1091
++razor101 MACH_RAZOR101 RAZOR101 1092
++opera_tdm MACH_OPERA_TDM OPERA_TDM 1093
++comcerto MACH_COMCERTO COMCERTO 1094
++tb0319 MACH_TB0319 TB0319 1095
++kws8000 MACH_KWS8000 KWS8000 1096
++b2 MACH_B2 B2 1097
++lcl54 MACH_LCL54 LCL54 1098
+ at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099
+ glantank MACH_GLANTANK GLANTANK 1100
+ n2100 MACH_N2100 N2100 1101
++n4100 MACH_N4100 N4100 1102
++rsc4 MACH_VERTICAL_RSC4 VERTICAL_RSC4 1103
++sg8100 MACH_SG8100 SG8100 1104
++im42xx MACH_IM42XX IM42XX 1105
++ftxx MACH_FTXX FTXX 1106
++lwfusion MACH_LWFUSION LWFUSION 1107
+ qt2410 MACH_QT2410 QT2410 1108
+ kixrp435 MACH_KIXRP435 KIXRP435 1109
++ccw9c MACH_CCW9C CCW9C 1110
++dabhs MACH_DABHS DABHS 1111
++gzmx MACH_GZMX GZMX 1112
++ipnw100ap MACH_IPNW100AP IPNW100AP 1113
+ cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114
++cc9p9750dev MACH_CC9P9750DEV CC9P9750DEV 1115
++cc9p9360val MACH_CC9P9360VAL CC9P9360VAL 1116
++cc9p9750val MACH_CC9P9750VAL CC9P9750VAL 1117
++nx70v MACH_NX70V NX70V 1118
++at91rm9200df MACH_AT91RM9200DF AT91RM9200DF 1119
++se_pilot2 MACH_SE_PILOT2 SE_PILOT2 1120
++mtcn_t800 MACH_MTCN_T800 MTCN_T800 1121
++vcmx212 MACH_VCMX212 VCMX212 1122
++lynx MACH_LYNX LYNX 1123
++at91sam9260id MACH_AT91SAM9260ID AT91SAM9260ID 1124
++hw86052 MACH_HW86052 HW86052 1125
++pilz_pmi3 MACH_PILZ_PMI3 PILZ_PMI3 1126
+ edb9302a MACH_EDB9302A EDB9302A 1127
+ edb9307a MACH_EDB9307A EDB9307A 1128
++ct_dfs MACH_CT_DFS CT_DFS 1129
++pilz_pmi4 MACH_PILZ_PMI4 PILZ_PMI4 1130
++xceednp_ixp MACH_XCEEDNP_IXP XCEEDNP_IXP 1131
++smdk2442b MACH_SMDK2442B SMDK2442B 1132
++xnode MACH_XNODE XNODE 1133
++aidx270 MACH_AIDX270 AIDX270 1134
++rema MACH_REMA REMA 1135
++bps1000 MACH_BPS1000 BPS1000 1136
++hw90350 MACH_HW90350 HW90350 1137
+ omap_3430sdp MACH_OMAP_3430SDP OMAP_3430SDP 1138
++bluetouch MACH_BLUETOUCH BLUETOUCH 1139
+ vstms MACH_VSTMS VSTMS 1140
++xsbase270 MACH_XSBASE270 XSBASE270 1141
++at91sam9260ek_cn MACH_AT91SAM9260EK_CN AT91SAM9260EK_CN 1142
++adsturboxb MACH_ADSTURBOXB ADSTURBOXB 1143
++oti4110 MACH_OTI4110 OTI4110 1144
++hme_pxa MACH_HME_PXA HME_PXA 1145
++deisterdca MACH_DEISTERDCA DEISTERDCA 1146
++ces_ssem2 MACH_CES_SSEM2 CES_SSEM2 1147
++ces_mtr MACH_CES_MTR CES_MTR 1148
++tds_avng_sbc MACH_TDS_AVNG_SBC TDS_AVNG_SBC 1149
++everest MACH_EVEREST EVEREST 1150
++pnx4010 MACH_PNX4010 PNX4010 1151
++oxnas MACH_OXNAS OXNAS 1152
++fiori MACH_FIORI FIORI 1153
++ml1200 MACH_ML1200 ML1200 1154
++pecos MACH_PECOS PECOS 1155
++nb2xxx MACH_NB2XXX NB2XXX 1156
++hw6900 MACH_HW6900 HW6900 1157
++cdcs_quoll MACH_CDCS_QUOLL CDCS_QUOLL 1158
++quicksilver MACH_QUICKSILVER QUICKSILVER 1159
++uplat926 MACH_UPLAT926 UPLAT926 1160
++dep2410_dep2410 MACH_DEP2410_THOMAS DEP2410_THOMAS 1161
++dtk2410 MACH_DTK2410 DTK2410 1162
++chili MACH_CHILI CHILI 1163
++demeter MACH_DEMETER DEMETER 1164
++dionysus MACH_DIONYSUS DIONYSUS 1165
++as352x MACH_AS352X AS352X 1166
++service MACH_SERVICE SERVICE 1167
++cs_e9301 MACH_CS_E9301 CS_E9301 1168
+ micro9m MACH_MICRO9M MICRO9M 1169
++ia_mospck MACH_IA_MOSPCK IA_MOSPCK 1170
++ql201b MACH_QL201B QL201B 1171
++bbm MACH_BBM BBM 1174
++exxx MACH_EXXX EXXX 1175
++wma11b MACH_WMA11B WMA11B 1176
++pelco_atlas MACH_PELCO_ATLAS PELCO_ATLAS 1177
++g500 MACH_G500 G500 1178
+ bug MACH_BUG BUG 1179
++mx33ads MACH_MX33ADS MX33ADS 1180
++chub MACH_CHUB CHUB 1181
++neo1973_gta01 MACH_NEO1973_GTA01 NEO1973_GTA01 1182
++w90n740 MACH_W90N740 W90N740 1183
++medallion_sa2410 MACH_MEDALLION_SA2410 MEDALLION_SA2410 1184
++ia_cpu_9200_2 MACH_IA_CPU_9200_2 IA_CPU_9200_2 1185
++dimmrm9200 MACH_DIMMRM9200 DIMMRM9200 1186
++pm9261 MACH_PM9261 PM9261 1187
++ml7304 MACH_ML7304 ML7304 1189
++ucp250 MACH_UCP250 UCP250 1190
++intboard MACH_INTBOARD INTBOARD 1191
++gulfstream MACH_GULFSTREAM GULFSTREAM 1192
++labquest MACH_LABQUEST LABQUEST 1193
++vcmx313 MACH_VCMX313 VCMX313 1194
++urg200 MACH_URG200 URG200 1195
++cpux255lcdnet MACH_CPUX255LCDNET CPUX255LCDNET 1196
++netdcu9 MACH_NETDCU9 NETDCU9 1197
++netdcu10 MACH_NETDCU10 NETDCU10 1198
++dspg_dga MACH_DSPG_DGA DSPG_DGA 1199
++dspg_dvw MACH_DSPG_DVW DSPG_DVW 1200
++solos MACH_SOLOS SOLOS 1201
+ at91sam9263ek MACH_AT91SAM9263EK AT91SAM9263EK 1202
++osstbox MACH_OSSTBOX OSSTBOX 1203
++kbat9261 MACH_KBAT9261 KBAT9261 1204
++ct1100 MACH_CT1100 CT1100 1205
++akcppxa MACH_AKCPPXA AKCPPXA 1206
++ochaya1020 MACH_OCHAYA1020 OCHAYA1020 1207
++hitrack MACH_HITRACK HITRACK 1208
++syme1 MACH_SYME1 SYME1 1209
++syhl1 MACH_SYHL1 SYHL1 1210
++empca400 MACH_EMPCA400 EMPCA400 1211
+ em7210 MACH_EM7210 EM7210 1212
++htchermes MACH_HTCHERMES HTCHERMES 1213
++eti_c1 MACH_ETI_C1 ETI_C1 1214
++ac100 MACH_AC100 AC100 1216
++sneetch MACH_SNEETCH SNEETCH 1217
++studentmate MACH_STUDENTMATE STUDENTMATE 1218
++zir2410 MACH_ZIR2410 ZIR2410 1219
++zir2413 MACH_ZIR2413 ZIR2413 1220
++dlonip3 MACH_DLONIP3 DLONIP3 1221
++instream MACH_INSTREAM INSTREAM 1222
++ambarella MACH_AMBARELLA AMBARELLA 1223
++nevis MACH_NEVIS NEVIS 1224
++htc_trinity MACH_HTC_TRINITY HTC_TRINITY 1225
++ql202b MACH_QL202B QL202B 1226
+ vpac270 MACH_VPAC270 VPAC270 1227
++rd129 MACH_RD129 RD129 1228
++htcwizard MACH_HTCWIZARD HTCWIZARD 1229
+ treo680 MACH_TREO680 TREO680 1230
++tecon_tmezon MACH_TECON_TMEZON TECON_TMEZON 1231
+ zylonite MACH_ZYLONITE ZYLONITE 1233
++gene1270 MACH_GENE1270 GENE1270 1234
++zir2412 MACH_ZIR2412 ZIR2412 1235
+ mx31lite MACH_MX31LITE MX31LITE 1236
++t700wx MACH_T700WX T700WX 1237
++vf100 MACH_VF100 VF100 1238
++nsb2 MACH_NSB2 NSB2 1239
++nxhmi_bb MACH_NXHMI_BB NXHMI_BB 1240
++nxhmi_re MACH_NXHMI_RE NXHMI_RE 1241
++n4100pro MACH_N4100PRO N4100PRO 1242
++sam9260 MACH_SAM9260 SAM9260 1243
++omap_treo600 MACH_OMAP_TREO600 OMAP_TREO600 1244
++indy2410 MACH_INDY2410 INDY2410 1245
++nelt_a MACH_NELT_A NELT_A 1246
++n311 MACH_N311 N311 1248
++at91sam9260vgk MACH_AT91SAM9260VGK AT91SAM9260VGK 1249
++at91leppe MACH_AT91LEPPE AT91LEPPE 1250
++at91lepccn MACH_AT91LEPCCN AT91LEPCCN 1251
++apc7100 MACH_APC7100 APC7100 1252
++stargazer MACH_STARGAZER STARGAZER 1253
++sonata MACH_SONATA SONATA 1254
++schmoogie MACH_SCHMOOGIE SCHMOOGIE 1255
++aztool MACH_AZTOOL AZTOOL 1256
+ mioa701 MACH_MIOA701 MIOA701 1257
++sxni9260 MACH_SXNI9260 SXNI9260 1258
++mxc27520evb MACH_MXC27520EVB MXC27520EVB 1259
+ armadillo5x0 MACH_ARMADILLO5X0 ARMADILLO5X0 1260
++mb9260 MACH_MB9260 MB9260 1261
++mb9263 MACH_MB9263 MB9263 1262
++ipac9302 MACH_IPAC9302 IPAC9302 1263
+ cc9p9360js MACH_CC9P9360JS CC9P9360JS 1264
++gallium MACH_GALLIUM GALLIUM 1265
++msc2410 MACH_MSC2410 MSC2410 1266
++ghi270 MACH_GHI270 GHI270 1267
++davinci_leonardo MACH_DAVINCI_LEONARDO DAVINCI_LEONARDO 1268
++oiab MACH_OIAB OIAB 1269
+ smdk6400 MACH_SMDK6400 SMDK6400 1270
+ nokia_n800 MACH_NOKIA_N800 NOKIA_N800 1271
++greenphone MACH_GREENPHONE GREENPHONE 1272
++compex42x MACH_COMPEXWP18 COMPEXWP18 1273
++xmate MACH_XMATE XMATE 1274
++energizer MACH_ENERGIZER ENERGIZER 1275
++ime1 MACH_IME1 IME1 1276
++sweda_tms MACH_SWEDATMS SWEDATMS 1277
++ntnp435c MACH_NTNP435C NTNP435C 1278
++spectro2 MACH_SPECTRO2 SPECTRO2 1279
++h6039 MACH_H6039 H6039 1280
+ ep80219 MACH_EP80219 EP80219 1281
++samoa_ii MACH_SAMOA_II SAMOA_II 1282
++cwmxl MACH_CWMXL CWMXL 1283
++as9200 MACH_AS9200 AS9200 1284
++sfx1149 MACH_SFX1149 SFX1149 1285
++navi010 MACH_NAVI010 NAVI010 1286
++multmdp MACH_MULTMDP MULTMDP 1287
++scb9520 MACH_SCB9520 SCB9520 1288
++htcathena MACH_HTCATHENA HTCATHENA 1289
++xp179 MACH_XP179 XP179 1290
++h4300 MACH_H4300 H4300 1291
+ goramo_mlr MACH_GORAMO_MLR GORAMO_MLR 1292
++mxc30020evb MACH_MXC30020EVB MXC30020EVB 1293
++adsbitsyg5 MACH_ADSBITSYG5 ADSBITSYG5 1294
++adsportalplus MACH_ADSPORTALPLUS ADSPORTALPLUS 1295
++mmsp2plus MACH_MMSP2PLUS MMSP2PLUS 1296
+ em_x270 MACH_EM_X270 EM_X270 1297
++tpp302 MACH_TPP302 TPP302 1298
++tpp104 MACH_TPM104 TPM104 1299
++tpm102 MACH_TPM102 TPM102 1300
++tpm109 MACH_TPM109 TPM109 1301
++fbxo1 MACH_FBXO1 FBXO1 1302
++hxd8 MACH_HXD8 HXD8 1303
+ neo1973_gta02 MACH_NEO1973_GTA02 NEO1973_GTA02 1304
++emtest MACH_EMTEST EMTEST 1305
++ad6900 MACH_AD6900 AD6900 1306
++europa MACH_EUROPA EUROPA 1307
++metroconnect MACH_METROCONNECT METROCONNECT 1308
++ez_s2410 MACH_EZ_S2410 EZ_S2410 1309
++ez_s2440 MACH_EZ_S2440 EZ_S2440 1310
++ez_ep9312 MACH_EZ_EP9312 EZ_EP9312 1311
++ez_ep9315 MACH_EZ_EP9315 EZ_EP9315 1312
++ez_x7 MACH_EZ_X7 EZ_X7 1313
++godotdb MACH_GODOTDB GODOTDB 1314
++mistral MACH_MISTRAL MISTRAL 1315
++msm MACH_MSM MSM 1316
++ct5910 MACH_CT5910 CT5910 1317
++ct5912 MACH_CT5912 CT5912 1318
++argonst_mp MACH_HYNET_INE HYNET_INE 1319
++hynet_app MACH_HYNET_APP HYNET_APP 1320
++msm7200 MACH_MSM7200 MSM7200 1321
++msm7600 MACH_MSM7600 MSM7600 1322
++ceb255 MACH_CEB255 CEB255 1323
++ciel MACH_CIEL CIEL 1324
++slm5650 MACH_SLM5650 SLM5650 1325
+ at91sam9rlek MACH_AT91SAM9RLEK AT91SAM9RLEK 1326
++comtech_router MACH_COMTECH_ROUTER COMTECH_ROUTER 1327
++sbc2410x MACH_SBC2410X SBC2410X 1328
++at4x0bd MACH_AT4X0BD AT4X0BD 1329
++cbifr MACH_CBIFR CBIFR 1330
++arcom_quantum MACH_ARCOM_QUANTUM ARCOM_QUANTUM 1331
++matrix520 MACH_MATRIX520 MATRIX520 1332
++matrix510 MACH_MATRIX510 MATRIX510 1333
++matrix500 MACH_MATRIX500 MATRIX500 1334
++m501 MACH_M501 M501 1335
++aaeon1270 MACH_AAEON1270 AAEON1270 1336
++matrix500ev MACH_MATRIX500EV MATRIX500EV 1337
++pac500 MACH_PAC500 PAC500 1338
++pnx8181 MACH_PNX8181 PNX8181 1339
+ colibri320 MACH_COLIBRI320 COLIBRI320 1340
++aztoolbb MACH_AZTOOLBB AZTOOLBB 1341
++aztoolg2 MACH_AZTOOLG2 AZTOOLG2 1342
++dvlhost MACH_DVLHOST DVLHOST 1343
++zir9200 MACH_ZIR9200 ZIR9200 1344
++zir9260 MACH_ZIR9260 ZIR9260 1345
++cocopah MACH_COCOPAH COCOPAH 1346
++nds MACH_NDS NDS 1347
++rosencrantz MACH_ROSENCRANTZ ROSENCRANTZ 1348
++fttx_odsc MACH_FTTX_ODSC FTTX_ODSC 1349
++classe_r6904 MACH_CLASSE_R6904 CLASSE_R6904 1350
+ cam60 MACH_CAM60 CAM60 1351
++mxc30031ads MACH_MXC30031ADS MXC30031ADS 1352
++datacall MACH_DATACALL DATACALL 1353
+ at91eb01 MACH_AT91EB01 AT91EB01 1354
++rty MACH_RTY RTY 1355
++dwl2100 MACH_DWL2100 DWL2100 1356
++vinsi MACH_VINSI VINSI 1357
+ db88f5281 MACH_DB88F5281 DB88F5281 1358
+ csb726 MACH_CSB726 CSB726 1359
++tik27 MACH_TIK27 TIK27 1360
++mx_uc7420 MACH_MX_UC7420 MX_UC7420 1361
++rirm3 MACH_RIRM3 RIRM3 1362
++pelco_odyssey MACH_PELCO_ODYSSEY PELCO_ODYSSEY 1363
++adx_abox MACH_ADX_ABOX ADX_ABOX 1365
++adx_tpid MACH_ADX_TPID ADX_TPID 1366
++minicheck MACH_MINICHECK MINICHECK 1367
++idam MACH_IDAM IDAM 1368
++mario_mx MACH_MARIO_MX MARIO_MX 1369
++vi1888 MACH_VI1888 VI1888 1370
++zr4230 MACH_ZR4230 ZR4230 1371
++t1_ix_blue MACH_T1_IX_BLUE T1_IX_BLUE 1372
++syhq2 MACH_SYHQ2 SYHQ2 1373
++computime_r3 MACH_COMPUTIME_R3 COMPUTIME_R3 1374
++oratis MACH_ORATIS ORATIS 1375
++mikko MACH_MIKKO MIKKO 1376
++holon MACH_HOLON HOLON 1377
++olip8 MACH_OLIP8 OLIP8 1378
++ghi270hg MACH_GHI270HG GHI270HG 1379
+ davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380
+ davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381
++blackriver MACH_BLACKRIVER BLACKRIVER 1383
++sandgate_wp MACH_SANDGATEWP SANDGATEWP 1384
++cdotbwsg MACH_CDOTBWSG CDOTBWSG 1385
++quark963 MACH_QUARK963 QUARK963 1386
++csb735 MACH_CSB735 CSB735 1387
+ littleton MACH_LITTLETON LITTLETON 1388
++mio_p550 MACH_MIO_P550 MIO_P550 1389
++motion2440 MACH_MOTION2440 MOTION2440 1390
++imm500 MACH_IMM500 IMM500 1391
++homematic MACH_HOMEMATIC HOMEMATIC 1392
++ermine MACH_ERMINE ERMINE 1393
++kb9202b MACH_KB9202B KB9202B 1394
++hs1xx MACH_HS1XX HS1XX 1395
++studentmate2440 MACH_STUDENTMATE2440 STUDENTMATE2440 1396
++arvoo_l1_z1 MACH_ARVOO_L1_Z1 ARVOO_L1_Z1 1397
++dep2410k MACH_DEP2410K DEP2410K 1398
++xxsvideo MACH_XXSVIDEO XXSVIDEO 1399
++im4004 MACH_IM4004 IM4004 1400
++ochaya1050 MACH_OCHAYA1050 OCHAYA1050 1401
++lep9261 MACH_LEP9261 LEP9261 1402
++svenmeb MACH_SVENMEB SVENMEB 1403
++fortunet2ne MACH_FORTUNET2NE FORTUNET2NE 1404
++nxhx MACH_NXHX NXHX 1406
+ realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407
++ids500 MACH_IDS500 IDS500 1408
++ors_n725 MACH_ORS_N725 ORS_N725 1409
++hsdarm MACH_HSDARM HSDARM 1410
++sha_pon003 MACH_SHA_PON003 SHA_PON003 1411
++sha_pon004 MACH_SHA_PON004 SHA_PON004 1412
++sha_pon007 MACH_SHA_PON007 SHA_PON007 1413
++sha_pon011 MACH_SHA_PON011 SHA_PON011 1414
++h6042 MACH_H6042 H6042 1415
++h6043 MACH_H6043 H6043 1416
++looxc550 MACH_LOOXC550 LOOXC550 1417
++cnty_titan MACH_CNTY_TITAN CNTY_TITAN 1418
++app3xx MACH_APP3XX APP3XX 1419
++sideoatsgrama MACH_SIDEOATSGRAMA SIDEOATSGRAMA 1420
++treo700p MACH_TREO700P TREO700P 1421
++treo700w MACH_TREO700W TREO700W 1422
++treo750 MACH_TREO750 TREO750 1423
++treo755p MACH_TREO755P TREO755P 1424
++ezreganut9200 MACH_EZREGANUT9200 EZREGANUT9200 1425
++sarge MACH_SARGE SARGE 1426
++a696 MACH_A696 A696 1427
++turtle1916 MACH_TURTLE TURTLE 1428
+ mx27_3ds MACH_MX27_3DS MX27_3DS 1430
++bishop MACH_BISHOP BISHOP 1431
++pxx MACH_PXX PXX 1432
++redwood MACH_REDWOOD REDWOOD 1433
++omap_2430dlp MACH_OMAP_2430DLP OMAP_2430DLP 1436
++omap_2430osk MACH_OMAP_2430OSK OMAP_2430OSK 1437
++sardine MACH_SARDINE SARDINE 1438
+ halibut MACH_HALIBUT HALIBUT 1439
+ trout MACH_TROUT TROUT 1440
++goldfish MACH_GOLDFISH GOLDFISH 1441
++gesbc2440 MACH_GESBC2440 GESBC2440 1442
++nomad MACH_NOMAD NOMAD 1443
++rosalind MACH_ROSALIND ROSALIND 1444
++cc9p9215 MACH_CC9P9215 CC9P9215 1445
++cc9p9210 MACH_CC9P9210 CC9P9210 1446
++cc9p9215js MACH_CC9P9215JS CC9P9215JS 1447
++cc9p9210js MACH_CC9P9210JS CC9P9210JS 1448
++nasffe MACH_NASFFE NASFFE 1449
++tn2x0bd MACH_TN2X0BD TN2X0BD 1450
++gwmpxa MACH_GWMPXA GWMPXA 1451
++exyplus MACH_EXYPLUS EXYPLUS 1452
++jadoo21 MACH_JADOO21 JADOO21 1453
++looxn560 MACH_LOOXN560 LOOXN560 1454
++bonsai MACH_BONSAI BONSAI 1455
++adsmilgato MACH_ADSMILGATO ADSMILGATO 1456
++gba MACH_GBA GBA 1457
++h6044 MACH_H6044 H6044 1458
++app MACH_APP APP 1459
+ tct_hammer MACH_TCT_HAMMER TCT_HAMMER 1460
+ herald MACH_HERALD HERALD 1461
++artemis MACH_ARTEMIS ARTEMIS 1462
++htctitan MACH_HTCTITAN HTCTITAN 1463
++qranium MACH_QRANIUM QRANIUM 1464
++adx_wsc2 MACH_ADX_WSC2 ADX_WSC2 1465
++adx_medcom MACH_ADX_MEDCOM ADX_MEDCOM 1466
++bboard MACH_BBOARD BBOARD 1467
++cambria MACH_CAMBRIA CAMBRIA 1468
++mt7xxx MACH_MT7XXX MT7XXX 1469
++matrix512 MACH_MATRIX512 MATRIX512 1470
++matrix522 MACH_MATRIX522 MATRIX522 1471
++ipac5010 MACH_IPAC5010 IPAC5010 1472
++sakura MACH_SAKURA SAKURA 1473
++grocx MACH_GROCX GROCX 1474
++pm9263 MACH_PM9263 PM9263 1475
+ sim_one MACH_SIM_ONE SIM_ONE 1476
++acq132 MACH_ACQ132 ACQ132 1477
++datr MACH_DATR DATR 1478
++actux1 MACH_ACTUX1 ACTUX1 1479
++actux2 MACH_ACTUX2 ACTUX2 1480
++actux3 MACH_ACTUX3 ACTUX3 1481
++flexit MACH_FLEXIT FLEXIT 1482
++bh2x0bd MACH_BH2X0BD BH2X0BD 1483
++atb2002 MACH_ATB2002 ATB2002 1484
++xenon MACH_XENON XENON 1485
++fm607 MACH_FM607 FM607 1486
++matrix514 MACH_MATRIX514 MATRIX514 1487
++matrix524 MACH_MATRIX524 MATRIX524 1488
++inpod MACH_INPOD INPOD 1489
+ jive MACH_JIVE JIVE 1490
++tll_mx21 MACH_TLL_MX21 TLL_MX21 1491
++sbc2800 MACH_SBC2800 SBC2800 1492
++cc7ucamry MACH_CC7UCAMRY CC7UCAMRY 1493
++ubisys_p9_sc15 MACH_UBISYS_P9_SC15 UBISYS_P9_SC15 1494
++ubisys_p9_ssc2d10 MACH_UBISYS_P9_SSC2D10 UBISYS_P9_SSC2D10 1495
++ubisys_p9_rcu3 MACH_UBISYS_P9_RCU3 UBISYS_P9_RCU3 1496
++aml_m8000 MACH_AML_M8000 AML_M8000 1497
++snapper_270 MACH_SNAPPER_270 SNAPPER_270 1498
++omap_bbx MACH_OMAP_BBX OMAP_BBX 1499
++ucn2410 MACH_UCN2410 UCN2410 1500
+ sam9_l9260 MACH_SAM9_L9260 SAM9_L9260 1501
++eti_c2 MACH_ETI_C2 ETI_C2 1502
++avalanche MACH_AVALANCHE AVALANCHE 1503
+ realview_pb1176 MACH_REALVIEW_PB1176 REALVIEW_PB1176 1504
++dp1500 MACH_DP1500 DP1500 1505
++apple_iphone MACH_APPLE_IPHONE APPLE_IPHONE 1506
+ yl9200 MACH_YL9200 YL9200 1507
+ rd88f5182 MACH_RD88F5182 RD88F5182 1508
+ kurobox_pro MACH_KUROBOX_PRO KUROBOX_PRO 1509
++se_poet MACH_SE_POET SE_POET 1510
+ mx31_3ds MACH_MX31_3DS MX31_3DS 1511
++r270 MACH_R270 R270 1512
++armour21 MACH_ARMOUR21 ARMOUR21 1513
++dt2 MACH_DT2 DT2 1514
++vt4 MACH_VT4 VT4 1515
++tyco320 MACH_TYCO320 TYCO320 1516
++adma MACH_ADMA ADMA 1517
++wp188 MACH_WP188 WP188 1518
++corsica MACH_CORSICA CORSICA 1519
++bigeye MACH_BIGEYE BIGEYE 1520
++tll5000 MACH_TLL5000 TLL5000 1522
++bebot MACH_BEBOT BEBOT 1523
+ qong MACH_QONG QONG 1524
++tcompact MACH_TCOMPACT TCOMPACT 1525
++puma5 MACH_PUMA5 PUMA5 1526
++elara MACH_ELARA ELARA 1527
++ellington MACH_ELLINGTON ELLINGTON 1528
++xda_atom MACH_XDA_ATOM XDA_ATOM 1529
++energizer2 MACH_ENERGIZER2 ENERGIZER2 1530
++odin MACH_ODIN ODIN 1531
++actux4 MACH_ACTUX4 ACTUX4 1532
++esl_omap MACH_ESL_OMAP ESL_OMAP 1533
+ omap2evm MACH_OMAP2EVM OMAP2EVM 1534
+ omap3evm MACH_OMAP3EVM OMAP3EVM 1535
++adx_pcu57 MACH_ADX_PCU57 ADX_PCU57 1536
++monaco MACH_MONACO MONACO 1537
++levante MACH_LEVANTE LEVANTE 1538
++tmxipx425 MACH_TMXIPX425 TMXIPX425 1539
++leep MACH_LEEP LEEP 1540
++raad MACH_RAAD RAAD 1541
+ dns323 MACH_DNS323 DNS323 1542
++ap1000 MACH_AP1000 AP1000 1543
++a9sam6432 MACH_A9SAM6432 A9SAM6432 1544
++shiny MACH_SHINY SHINY 1545
+ omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546
++csr_bdb2 MACH_CSR_BDB2 CSR_BDB2 1547
+ nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548
++c270 MACH_C270 C270 1549
++sentry MACH_SENTRY SENTRY 1550
+ pcm038 MACH_PCM038 PCM038 1551
++anc300 MACH_ANC300 ANC300 1552
++htckaiser MACH_HTCKAISER HTCKAISER 1553
++sbat100 MACH_SBAT100 SBAT100 1554
++modunorm MACH_MODUNORM MODUNORM 1555
++pelos_twarm MACH_PELOS_TWARM PELOS_TWARM 1556
++flank MACH_FLANK FLANK 1557
++sirloin MACH_SIRLOIN SIRLOIN 1558
++brisket MACH_BRISKET BRISKET 1559
++chuck MACH_CHUCK CHUCK 1560
++otter MACH_OTTER OTTER 1561
++davinci_ldk MACH_DAVINCI_LDK DAVINCI_LDK 1562
++phreedom MACH_PHREEDOM PHREEDOM 1563
++sg310 MACH_SG310 SG310 1564
+ ts209 MACH_TS209 TS209 1565
+ at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
++tion9315 MACH_TION9315 TION9315 1567
++mast MACH_MAST MAST 1568
++pfw MACH_PFW PFW 1569
++yl_p2440 MACH_YL_P2440 YL_P2440 1570
++zsbc32 MACH_ZSBC32 ZSBC32 1571
++omap_pace2 MACH_OMAP_PACE2 OMAP_PACE2 1572
++imx_pace2 MACH_IMX_PACE2 IMX_PACE2 1573
+ mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
++mx37_3ds MACH_MX37_3DS MX37_3DS 1575
++rcc MACH_RCC RCC 1576
++dmp MACH_ARM9 ARM9 1577
+ vision_ep9307 MACH_VISION_EP9307 VISION_EP9307 1578
++scly1000 MACH_SCLY1000 SCLY1000 1579
++fontel_ep MACH_FONTEL_EP FONTEL_EP 1580
++voiceblue3g MACH_VOICEBLUE3G VOICEBLUE3G 1581
++tt9200 MACH_TT9200 TT9200 1582
++digi2410 MACH_DIGI2410 DIGI2410 1583
+ terastation_pro2 MACH_TERASTATION_PRO2 TERASTATION_PRO2 1584
+ linkstation_pro MACH_LINKSTATION_PRO LINKSTATION_PRO 1585
++motorola_a780 MACH_MOTOROLA_A780 MOTOROLA_A780 1587
++motorola_e6 MACH_MOTOROLA_E6 MOTOROLA_E6 1588
++motorola_e2 MACH_MOTOROLA_E2 MOTOROLA_E2 1589
++motorola_e680 MACH_MOTOROLA_E680 MOTOROLA_E680 1590
++ur2410 MACH_UR2410 UR2410 1591
++tas9261 MACH_TAS9261 TAS9261 1592
++davinci_hermes_hd MACH_HERMES_HD HERMES_HD 1593
++davinci_perseo_hd MACH_PERSEO_HD PERSEO_HD 1594
++stargazer2 MACH_STARGAZER2 STARGAZER2 1595
+ e350 MACH_E350 E350 1596
++wpcm450 MACH_WPCM450 WPCM450 1597
++cartesio MACH_CARTESIO CARTESIO 1598
++toybox MACH_TOYBOX TOYBOX 1599
++tx27 MACH_TX27 TX27 1600
+ ts409 MACH_TS409 TS409 1601
++p300 MACH_P300 P300 1602
++xdacomet MACH_XDACOMET XDACOMET 1603
++dexflex2 MACH_DEXFLEX2 DEXFLEX2 1604
++ow MACH_OW OW 1605
++armebs3 MACH_ARMEBS3 ARMEBS3 1606
++u3 MACH_U3 U3 1607
++smdk2450 MACH_SMDK2450 SMDK2450 1608
+ rsi_ews MACH_RSI_EWS RSI_EWS 1609
++tnb MACH_TNB TNB 1610
++toepath MACH_TOEPATH TOEPATH 1611
++kb9263 MACH_KB9263 KB9263 1612
++mt7108 MACH_MT7108 MT7108 1613
++smtr2440 MACH_SMTR2440 SMTR2440 1614
++manao MACH_MANAO MANAO 1615
+ cm_x300 MACH_CM_X300 CM_X300 1616
++gulfstream_kp MACH_GULFSTREAM_KP GULFSTREAM_KP 1617
++lanreadyfn522 MACH_LANREADYFN522 LANREADYFN522 1618
++arma37 MACH_ARMA37 ARMA37 1619
++mendel MACH_MENDEL MENDEL 1620
++pelco_iliad MACH_PELCO_ILIAD PELCO_ILIAD 1621
++unit2p MACH_UNIT2P UNIT2P 1622
++inc20otter MACH_INC20OTTER INC20OTTER 1623
+ at91sam9g20ek MACH_AT91SAM9G20EK AT91SAM9G20EK 1624
++sc_ge2 MACH_STORCENTER STORCENTER 1625
+ smdk6410 MACH_SMDK6410 SMDK6410 1626
+ u300 MACH_U300 U300 1627
++u500 MACH_U500 U500 1628
++ds9260 MACH_DS9260 DS9260 1629
++riverrock MACH_RIVERROCK RIVERROCK 1630
++scibath MACH_SCIBATH SCIBATH 1631
++at91sam7se MACH_AT91SAM7SE512EK AT91SAM7SE512EK 1632
+ wrt350n_v2 MACH_WRT350N_V2 WRT350N_V2 1633
++multimedia MACH_MULTIMEDIA MULTIMEDIA 1634
++marvin MACH_MARVIN MARVIN 1635
++x500 MACH_X500 X500 1636
++awlug4lcu MACH_AWLUG4LCU AWLUG4LCU 1637
++palermoc MACH_PALERMOC PALERMOC 1638
+ omap_ldp MACH_OMAP_LDP OMAP_LDP 1639
++ip500 MACH_IP500 IP500 1640
++ase2 MACH_ASE2 ASE2 1642
++mx35evb MACH_MX35EVB MX35EVB 1643
++aml_m8050 MACH_AML_M8050 AML_M8050 1644
+ mx35_3ds MACH_MX35_3DS MX35_3DS 1645
++mars MACH_MARS MARS 1646
+ neuros_osd2 MACH_NEUROS_OSD2 NEUROS_OSD2 1647
++badger MACH_BADGER BADGER 1648
+ trizeps4wl MACH_TRIZEPS4WL TRIZEPS4WL 1649
++trizeps5 MACH_TRIZEPS5 TRIZEPS5 1650
++marlin MACH_MARLIN MARLIN 1651
+ ts78xx MACH_TS78XX TS78XX 1652
++hpipaq214 MACH_HPIPAQ214 HPIPAQ214 1653
++at572d940dcm MACH_AT572D940DCM AT572D940DCM 1654
++ne1board MACH_NE1BOARD NE1BOARD 1655
++zante MACH_ZANTE ZANTE 1656
+ sffsdr MACH_SFFSDR SFFSDR 1657
++tw2662 MACH_TW2662 TW2662 1658
++vf10xx MACH_VF10XX VF10XX 1659
++zoran43xx MACH_ZORAN43XX ZORAN43XX 1660
++sonix926 MACH_SONIX926 SONIX926 1661
++celestialsemi MACH_CELESTIALSEMI CELESTIALSEMI 1662
++cc9m2443js MACH_CC9M2443JS CC9M2443JS 1663
++tw5334 MACH_TW5334 TW5334 1664
++omap_htcartemis MACH_HTCARTEMIS HTCARTEMIS 1665
++nal_hlite MACH_NAL_HLITE NAL_HLITE 1666
++htcvogue MACH_HTCVOGUE HTCVOGUE 1667
++smartweb MACH_SMARTWEB SMARTWEB 1668
++mv86xx MACH_MV86XX MV86XX 1669
++mv87xx MACH_MV87XX MV87XX 1670
++songyoungho MACH_SONGYOUNGHO SONGYOUNGHO 1671
++younghotema MACH_YOUNGHOTEMA YOUNGHOTEMA 1672
+ pcm037 MACH_PCM037 PCM037 1673
++mmvp MACH_MMVP MMVP 1674
++mmap MACH_MMAP MMAP 1675
++ptid2410 MACH_PTID2410 PTID2410 1676
++james_926 MACH_JAMES_926 JAMES_926 1677
++fm6000 MACH_FM6000 FM6000 1678
+ db88f6281_bp MACH_DB88F6281_BP DB88F6281_BP 1680
+ rd88f6192_nas MACH_RD88F6192_NAS RD88F6192_NAS 1681
+ rd88f6281 MACH_RD88F6281 RD88F6281 1682
+ db78x00_bp MACH_DB78X00_BP DB78X00_BP 1683
+ smdk2416 MACH_SMDK2416 SMDK2416 1685
++oce_spider_si MACH_OCE_SPIDER_SI OCE_SPIDER_SI 1686
++oce_spider_sk MACH_OCE_SPIDER_SK OCE_SPIDER_SK 1687
++rovern6 MACH_ROVERN6 ROVERN6 1688
++pelco_evolution MACH_PELCO_EVOLUTION PELCO_EVOLUTION 1689
+ wbd111 MACH_WBD111 WBD111 1690
++elaracpe MACH_ELARACPE ELARACPE 1691
++mabv3 MACH_MABV3 MABV3 1692
+ mv2120 MACH_MV2120 MV2120 1693
++csb737 MACH_CSB737 CSB737 1695
+ mx51_3ds MACH_MX51_3DS MX51_3DS 1696
++g900 MACH_G900 G900 1697
++apf27 MACH_APF27 APF27 1698
++ggus2000 MACH_GGUS2000 GGUS2000 1699
++omap_2430_mimic MACH_OMAP_2430_MIMIC OMAP_2430_MIMIC 1700
+ imx27lite MACH_IMX27LITE IMX27LITE 1701
++almex MACH_ALMEX ALMEX 1702
++control MACH_CONTROL CONTROL 1703
++mba2410 MACH_MBA2410 MBA2410 1704
++volcano MACH_VOLCANO VOLCANO 1705
++zenith MACH_ZENITH ZENITH 1706
++muchip MACH_MUCHIP MUCHIP 1707
++magellan MACH_MAGELLAN MAGELLAN 1708
+ usb_a9260 MACH_USB_A9260 USB_A9260 1709
+ usb_a9263 MACH_USB_A9263 USB_A9263 1710
+ qil_a9260 MACH_QIL_A9260 QIL_A9260 1711
++cme9210 MACH_CME9210 CME9210 1712
++hczh4 MACH_HCZH4 HCZH4 1713
++spearbasic MACH_SPEARBASIC SPEARBASIC 1714
++dep2440 MACH_DEP2440 DEP2440 1715
++hdl_gxr MACH_HDL_GXR HDL_GXR 1716
++hdl_gt MACH_HDL_GT HDL_GT 1717
++hdl_4g MACH_HDL_4G HDL_4G 1718
++s3c6000 MACH_S3C6000 S3C6000 1719
++mmsp2_mdk MACH_MMSP2_MDK MMSP2_MDK 1720
++mpx220 MACH_MPX220 MPX220 1721
+ kzm_arm11_01 MACH_KZM_ARM11_01 KZM_ARM11_01 1722
++htc_polaris MACH_HTC_POLARIS HTC_POLARIS 1723
++htc_kaiser MACH_HTC_KAISER HTC_KAISER 1724
++lg_ks20 MACH_LG_KS20 LG_KS20 1725
++hhgps MACH_HHGPS HHGPS 1726
+ nokia_n810_wimax MACH_NOKIA_N810_WIMAX NOKIA_N810_WIMAX 1727
++insight MACH_INSIGHT INSIGHT 1728
+ sapphire MACH_SAPPHIRE SAPPHIRE 1729
++csb637xo MACH_CSB637XO CSB637XO 1730
++evisiong MACH_EVISIONG EVISIONG 1731
+ stmp37xx MACH_STMP37XX STMP37XX 1732
+ stmp378x MACH_STMP378X STMP378X 1733
++tnt MACH_TNT TNT 1734
++tbxt MACH_TBXT TBXT 1735
++playmate MACH_PLAYMATE PLAYMATE 1736
++pns10 MACH_PNS10 PNS10 1737
++eznavi MACH_EZNAVI EZNAVI 1738
++ps4000 MACH_PS4000 PS4000 1739
+ ezx_a780 MACH_EZX_A780 EZX_A780 1740
+ ezx_e680 MACH_EZX_E680 EZX_E680 1741
+ ezx_a1200 MACH_EZX_A1200 EZX_A1200 1742
+ ezx_e6 MACH_EZX_E6 EZX_E6 1743
+ ezx_e2 MACH_EZX_E2 EZX_E2 1744
+ ezx_a910 MACH_EZX_A910 EZX_A910 1745
++cwmx31 MACH_CWMX31 CWMX31 1746
++sl2312 MACH_SL2312 SL2312 1747
++blenny MACH_BLENNY BLENNY 1748
++ds107 MACH_DS107 DS107 1749
++dsx07 MACH_DSX07 DSX07 1750
++picocom1 MACH_PICOCOM1 PICOCOM1 1751
++lynx_wolverine MACH_LYNX_WOLVERINE LYNX_WOLVERINE 1752
++ubisys_p9_sc19 MACH_UBISYS_P9_SC19 UBISYS_P9_SC19 1753
++kratos_low MACH_KRATOS_LOW KRATOS_LOW 1754
++m700 MACH_M700 M700 1755
+ edmini_v2 MACH_EDMINI_V2 EDMINI_V2 1756
+ zipit2 MACH_ZIPIT2 ZIPIT2 1757
++hslfemtocell MACH_HSLFEMTOCELL HSLFEMTOCELL 1758
++daintree_at91 MACH_DAINTREE_AT91 DAINTREE_AT91 1759
++sg560usb MACH_SG560USB SG560USB 1760
+ omap3_pandora MACH_OMAP3_PANDORA OMAP3_PANDORA 1761
++usr8200 MACH_USR8200 USR8200 1762
++s1s65k MACH_S1S65K S1S65K 1763
++s2s65a MACH_S2S65A S2S65A 1764
++icore MACH_ICORE ICORE 1765
+ mss2 MACH_MSS2 MSS2 1766
++belmont MACH_BELMONT BELMONT 1767
++asusp525 MACH_ASUSP525 ASUSP525 1768
+ lb88rc8480 MACH_LB88RC8480 LB88RC8480 1769
++hipxa MACH_HIPXA HIPXA 1770
+ mx25_3ds MACH_MX25_3DS MX25_3DS 1771
++m800 MACH_M800 M800 1772
+ omap3530_lv_som MACH_OMAP3530_LV_SOM OMAP3530_LV_SOM 1773
++prima_evb MACH_PRIMA_EVB PRIMA_EVB 1774
++mx31bt1 MACH_MX31BT1 MX31BT1 1775
++atlas4_evb MACH_ATLAS4_EVB ATLAS4_EVB 1776
++mx31cicada MACH_MX31CICADA MX31CICADA 1777
++mi424wr MACH_MI424WR MI424WR 1778
++axs_ultrax MACH_AXS_ULTRAX AXS_ULTRAX 1779
++at572d940deb MACH_AT572D940DEB AT572D940DEB 1780
+ davinci_da830_evm MACH_DAVINCI_DA830_EVM DAVINCI_DA830_EVM 1781
++ep9302 MACH_EP9302 EP9302 1782
++cybook3 MACH_CYBOOK3 CYBOOK3 1784
++wdg002 MACH_WDG002 WDG002 1785
++sg560adsl MACH_SG560ADSL SG560ADSL 1786
++nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787
+ dove_db MACH_DOVE_DB DOVE_DB 1788
++vandihud MACH_VANDIHUD VANDIHUD 1790
++magx_e8 MACH_MAGX_E8 MAGX_E8 1791
++magx_z6 MACH_MAGX_Z6 MAGX_Z6 1792
++magx_v8 MACH_MAGX_V8 MAGX_V8 1793
++magx_u9 MACH_MAGX_U9 MAGX_U9 1794
++toughcf08 MACH_TOUGHCF08 TOUGHCF08 1795
++zw4400 MACH_ZW4400 ZW4400 1796
++marat91 MACH_MARAT91 MARAT91 1797
+ overo MACH_OVERO OVERO 1798
+ at2440evb MACH_AT2440EVB AT2440EVB 1799
+ neocore926 MACH_NEOCORE926 NEOCORE926 1800
+ wnr854t MACH_WNR854T WNR854T 1801
++imx27 MACH_IMX27 IMX27 1802
++moose_db MACH_MOOSE_DB MOOSE_DB 1803
++fab4 MACH_FAB4 FAB4 1804
++htcdiamond MACH_HTCDIAMOND HTCDIAMOND 1805
++fiona MACH_FIONA FIONA 1806
++mxc30030_x MACH_MXC30030_X MXC30030_X 1807
++bmp1000 MACH_BMP1000 BMP1000 1808
++logi9200 MACH_LOGI9200 LOGI9200 1809
++tqma31 MACH_TQMA31 TQMA31 1810
++ccw9p9215js MACH_CCW9P9215JS CCW9P9215JS 1811
+ rd88f5181l_ge MACH_RD88F5181L_GE RD88F5181L_GE 1812
++sifmain MACH_SIFMAIN SIFMAIN 1813
++sam9_l9261 MACH_SAM9_L9261 SAM9_L9261 1814
++cc9m2443 MACH_CC9M2443 CC9M2443 1815
++xaria300 MACH_XARIA300 XARIA300 1816
++it9200 MACH_IT9200 IT9200 1817
+ rd88f5181l_fxo MACH_RD88F5181L_FXO RD88F5181L_FXO 1818
++kriss_sensor MACH_KRISS_SENSOR KRISS_SENSOR 1819
++pilz_pmi5 MACH_PILZ_PMI5 PILZ_PMI5 1820
++jade MACH_JADE JADE 1821
++ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822
++gprisc3 MACH_GPRISC3 GPRISC3 1823
+ stamp9g20 MACH_STAMP9G20 STAMP9G20 1824
++smdk6430 MACH_SMDK6430 SMDK6430 1825
+ smdkc100 MACH_SMDKC100 SMDKC100 1826
+ tavorevb MACH_TAVOREVB TAVOREVB 1827
+ saar MACH_SAAR SAAR 1828
++deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829
+ at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830
++linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831
++hit_b0 MACH_HIT_B0 HIT_B0 1832
++adx_rmu MACH_ADX_RMU ADX_RMU 1833
++xg_cpe_main MACH_XG_CPE_MAIN XG_CPE_MAIN 1834
++edb9407a MACH_EDB9407A EDB9407A 1835
++dtb9608 MACH_DTB9608 DTB9608 1836
++em104v1 MACH_EM104V1 EM104V1 1837
++demo MACH_DEMO DEMO 1838
++logi9260 MACH_LOGI9260 LOGI9260 1839
++mx31_exm32 MACH_MX31_EXM32 MX31_EXM32 1840
+ usb_a9g20 MACH_USB_A9G20 USB_A9G20 1841
++picproje2008 MACH_PICPROJE2008 PICPROJE2008 1842
++cs_e9315 MACH_CS_E9315 CS_E9315 1843
++qil_a9g20 MACH_QIL_A9G20 QIL_A9G20 1844
++sha_pon020 MACH_SHA_PON020 SHA_PON020 1845
++nad MACH_NAD NAD 1846
++sbc35_a9260 MACH_SBC35_A9260 SBC35_A9260 1847
++sbc35_a9g20 MACH_SBC35_A9G20 SBC35_A9G20 1848
++davinci_beginning MACH_DAVINCI_BEGINNING DAVINCI_BEGINNING 1849
++uwc MACH_UWC UWC 1850
+ mxlads MACH_MXLADS MXLADS 1851
++htcnike MACH_HTCNIKE HTCNIKE 1852
++deister_pxa270 MACH_DEISTER_PXA270 DEISTER_PXA270 1853
++cme9210js MACH_CME9210JS CME9210JS 1854
++cc9p9360 MACH_CC9P9360 CC9P9360 1855
++mocha MACH_MOCHA MOCHA 1856
++wapd170ag MACH_WAPD170AG WAPD170AG 1857
+ linkstation_mini MACH_LINKSTATION_MINI LINKSTATION_MINI 1858
+ afeb9260 MACH_AFEB9260 AFEB9260 1859
++w90x900 MACH_W90X900 W90X900 1860
++w90x700 MACH_W90X700 W90X700 1861
++kt300ip MACH_KT300IP KT300IP 1862
++kt300ip_g20 MACH_KT300IP_G20 KT300IP_G20 1863
++srcm MACH_SRCM SRCM 1864
++wlnx_9260 MACH_WLNX_9260 WLNX_9260 1865
++openmoko_gta03 MACH_OPENMOKO_GTA03 OPENMOKO_GTA03 1866
++osprey2 MACH_OSPREY2 OSPREY2 1867
++kbio9260 MACH_KBIO9260 KBIO9260 1868
++ginza MACH_GINZA GINZA 1869
++a636n MACH_A636N A636N 1870
+ imx27ipcam MACH_IMX27IPCAM IMX27IPCAM 1871
++nemoc MACH_NEMOC NEMOC 1872
++geneva MACH_GENEVA GENEVA 1873
++htcpharos MACH_HTCPHAROS HTCPHAROS 1874
++neonc MACH_NEONC NEONC 1875
++nas7100 MACH_NAS7100 NAS7100 1876
++teuphone MACH_TEUPHONE TEUPHONE 1877
++annax_eth2 MACH_ANNAX_ETH2 ANNAX_ETH2 1878
++csb733 MACH_CSB733 CSB733 1879
++bk3 MACH_BK3 BK3 1880
++omap_em32 MACH_OMAP_EM32 OMAP_EM32 1881
++et9261cp MACH_ET9261CP ET9261CP 1882
++jasperc MACH_JASPERC JASPERC 1883
++issi_arm9 MACH_ISSI_ARM9 ISSI_ARM9 1884
++ued MACH_UED UED 1885
++esiblade MACH_ESIBLADE ESIBLADE 1886
++eye02 MACH_EYE02 EYE02 1887
++imx27kbd MACH_IMX27KBD IMX27KBD 1888
++kixvp435 MACH_KIXVP435 KIXVP435 1890
++kixnp435 MACH_KIXNP435 KIXNP435 1891
++africa MACH_AFRICA AFRICA 1892
++nh233 MACH_NH233 NH233 1893
+ rd88f6183ap_ge MACH_RD88F6183AP_GE RD88F6183AP_GE 1894
++bcm4760 MACH_BCM4760 BCM4760 1895
++eddy_v2 MACH_EDDY_V2 EDDY_V2 1896
+ realview_pba8 MACH_REALVIEW_PBA8 REALVIEW_PBA8 1897
++hid_a7 MACH_HID_A7 HID_A7 1898
++hero MACH_HERO HERO 1899
++omap_poseidon MACH_OMAP_POSEIDON OMAP_POSEIDON 1900
+ realview_pbx MACH_REALVIEW_PBX REALVIEW_PBX 1901
+ micro9s MACH_MICRO9S MICRO9S 1902
++mako MACH_MAKO MAKO 1903
++xdaflame MACH_XDAFLAME XDAFLAME 1904
++phidget_sbc2 MACH_PHIDGET_SBC2 PHIDGET_SBC2 1905
++limestone MACH_LIMESTONE LIMESTONE 1906
++iprobe_c32 MACH_IPROBE_C32 IPROBE_C32 1907
+ rut100 MACH_RUT100 RUT100 1908
++asusp535 MACH_ASUSP535 ASUSP535 1909
++htcraphael MACH_HTCRAPHAEL HTCRAPHAEL 1910
++sygdg1 MACH_SYGDG1 SYGDG1 1911
++sygdg2 MACH_SYGDG2 SYGDG2 1912
++seoul MACH_SEOUL SEOUL 1913
++salerno MACH_SALERNO SALERNO 1914
++ucn_s3c64xx MACH_UCN_S3C64XX UCN_S3C64XX 1915
++msm7201a MACH_MSM7201A MSM7201A 1916
++lpr1 MACH_LPR1 LPR1 1917
++armadillo500fx MACH_ARMADILLO500FX ARMADILLO500FX 1918
+ g3evm MACH_G3EVM G3EVM 1919
++z3_dm355 MACH_Z3_DM355 Z3_DM355 1920
+ w90p910evb MACH_W90P910EVB W90P910EVB 1921
++w90p920evb MACH_W90P920EVB W90P920EVB 1922
+ w90p950evb MACH_W90P950EVB W90P950EVB 1923
+ w90n960evb MACH_W90N960EVB W90N960EVB 1924
++camhd MACH_CAMHD CAMHD 1925
++mvc100 MACH_MVC100 MVC100 1926
++electrum_200 MACH_ELECTRUM_200 ELECTRUM_200 1927
++htcjade MACH_HTCJADE HTCJADE 1928
++memphis MACH_MEMPHIS MEMPHIS 1929
++imx27sbc MACH_IMX27SBC IMX27SBC 1930
++lextar MACH_LEXTAR LEXTAR 1931
+ mv88f6281gtw_ge MACH_MV88F6281GTW_GE MV88F6281GTW_GE 1932
+ ncp MACH_NCP NCP 1933
++z32an_series MACH_Z32AN Z32AN 1934
++tmq_capd MACH_TMQ_CAPD TMQ_CAPD 1935
++omap3_wl MACH_OMAP3_WL OMAP3_WL 1936
++chumby MACH_CHUMBY CHUMBY 1937
++atsarm9 MACH_ATSARM9 ATSARM9 1938
+ davinci_dm365_evm MACH_DAVINCI_DM365_EVM DAVINCI_DM365_EVM 1939
++bahamas MACH_BAHAMAS BAHAMAS 1940
++das MACH_DAS DAS 1941
++minidas MACH_MINIDAS MINIDAS 1942
++vk1000 MACH_VK1000 VK1000 1943
+ centro MACH_CENTRO CENTRO 1944
++ctera_2bay MACH_CTERA_2BAY CTERA_2BAY 1945
++edgeconnect MACH_EDGECONNECT EDGECONNECT 1946
++nd27000 MACH_ND27000 ND27000 1947
++cobra MACH_GEMALTO_COBRA GEMALTO_COBRA 1948
++ingelabs_comet MACH_INGELABS_COMET INGELABS_COMET 1949
++pollux_wiz MACH_POLLUX_WIZ POLLUX_WIZ 1950
++blackstone MACH_BLACKSTONE BLACKSTONE 1951
++topaz MACH_TOPAZ TOPAZ 1952
++aixle MACH_AIXLE AIXLE 1953
++mw998 MACH_MW998 MW998 1954
+ nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
++vsc5605ev MACH_VSC5605EV VSC5605EV 1956
++nt98700dk MACH_NT98700DK NT98700DK 1957
++icontact MACH_ICONTACT ICONTACT 1958
++swarco_frcpu MACH_SWARCO_FRCPU SWARCO_FRCPU 1959
++swarco_scpu MACH_SWARCO_SCPU SWARCO_SCPU 1960
++bbox_p16 MACH_BBOX_P16 BBOX_P16 1961
++bstd MACH_BSTD BSTD 1962
++sbc2440ii MACH_SBC2440II SBC2440II 1963
++pcm034 MACH_PCM034 PCM034 1964
++neso MACH_NESO NESO 1965
++wlnx_9g20 MACH_WLNX_9G20 WLNX_9G20 1966
+ omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
++totemnova MACH_TOTEMNOVA TOTEMNOVA 1968
++c5000 MACH_C5000 C5000 1969
++unipo_at91sam9263 MACH_UNIPO_AT91SAM9263 UNIPO_AT91SAM9263 1970
++ethernut5 MACH_ETHERNUT5 ETHERNUT5 1971
++arm11 MACH_ARM11 ARM11 1972
+ cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
++cpupxa255 MACH_CPUPXA255 CPUPXA255 1974
+ eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975
++cheflux MACH_CHEFLUX CHEFLUX 1976
++eb_cpux9k2 MACH_EB_CPUX9K2 EB_CPUX9K2 1977
++opcotec MACH_OPCOTEC OPCOTEC 1978
++yt MACH_YT YT 1979
++motoq MACH_MOTOQ MOTOQ 1980
++bsb1 MACH_BSB1 BSB1 1981
+ acs5k MACH_ACS5K ACS5K 1982
++milan MACH_MILAN MILAN 1983
++quartzv2 MACH_QUARTZV2 QUARTZV2 1984
++rsvp MACH_RSVP RSVP 1985
++rmp200 MACH_RMP200 RMP200 1986
+ snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
+ dsm320 MACH_DSM320 DSM320 1988
++adsgcm MACH_ADSGCM ADSGCM 1989
++ase2_400 MACH_ASE2_400 ASE2_400 1990
++pizza MACH_PIZZA PIZZA 1991
++spot_ngpl MACH_SPOT_NGPL SPOT_NGPL 1992
++armata MACH_ARMATA ARMATA 1993
+ exeda MACH_EXEDA EXEDA 1994
++mx31sf005 MACH_MX31SF005 MX31SF005 1995
++f5d8231_4_v2 MACH_F5D8231_4_V2 F5D8231_4_V2 1996
++q2440 MACH_Q2440 Q2440 1997
++qq2440 MACH_QQ2440 QQ2440 1998
+ mini2440 MACH_MINI2440 MINI2440 1999
+ colibri300 MACH_COLIBRI300 COLIBRI300 2000
++jades MACH_JADES JADES 2001
++spark MACH_SPARK SPARK 2002
++benzina MACH_BENZINA BENZINA 2003
++blaze MACH_BLAZE BLAZE 2004
+ linkstation_ls_hgl MACH_LINKSTATION_LS_HGL LINKSTATION_LS_HGL 2005
++htckovsky MACH_HTCKOVSKY HTCKOVSKY 2006
++sony_prs505 MACH_SONY_PRS505 SONY_PRS505 2007
++hanlin_v3 MACH_HANLIN_V3 HANLIN_V3 2008
++sapphira MACH_SAPPHIRA SAPPHIRA 2009
++dack_sda_01 MACH_DACK_SDA_01 DACK_SDA_01 2010
++armbox MACH_ARMBOX ARMBOX 2011
++harris_rvp MACH_HARRIS_RVP HARRIS_RVP 2012
++ribaldo MACH_RIBALDO RIBALDO 2013
++agora MACH_AGORA AGORA 2014
++omap3_mini MACH_OMAP3_MINI OMAP3_MINI 2015
++a9sam6432_b MACH_A9SAM6432_B A9SAM6432_B 2016
++usg2410 MACH_USG2410 USG2410 2017
++pc72052_i10_revb MACH_PC72052_I10_REVB PC72052_I10_REVB 2018
++mx35_exm32 MACH_MX35_EXM32 MX35_EXM32 2019
++topas910 MACH_TOPAS910 TOPAS910 2020
++hyena MACH_HYENA HYENA 2021
++pospax MACH_POSPAX POSPAX 2022
++hdl_gx MACH_HDL_GX HDL_GX 2023
++ctera_4bay MACH_CTERA_4BAY CTERA_4BAY 2024
++ctera_plug_c MACH_CTERA_PLUG_C CTERA_PLUG_C 2025
++crwea_plug_i MACH_CRWEA_PLUG_I CRWEA_PLUG_I 2026
++egauge2 MACH_EGAUGE2 EGAUGE2 2027
++didj MACH_DIDJ DIDJ 2028
++m_s3c2443 MACH_MEISTER MEISTER 2029
++htcblackstone MACH_HTCBLACKSTONE HTCBLACKSTONE 2030
+ cpuat9g20 MACH_CPUAT9G20 CPUAT9G20 2031
+ smdk6440 MACH_SMDK6440 SMDK6440 2032
++omap_35xx_mvp MACH_OMAP_35XX_MVP OMAP_35XX_MVP 2033
++ctera_plug_i MACH_CTERA_PLUG_I CTERA_PLUG_I 2034
++pvg610_100 MACH_PVG610 PVG610 2035
++hprw6815 MACH_HPRW6815 HPRW6815 2036
++omap3_oswald MACH_OMAP3_OSWALD OMAP3_OSWALD 2037
+ nas4220b MACH_NAS4220B NAS4220B 2038
++htcraphael_cdma MACH_HTCRAPHAEL_CDMA HTCRAPHAEL_CDMA 2039
++htcdiamond_cdma MACH_HTCDIAMOND_CDMA HTCDIAMOND_CDMA 2040
++scaler MACH_SCALER SCALER 2041
+ zylonite2 MACH_ZYLONITE2 ZYLONITE2 2042
+ aspenite MACH_ASPENITE ASPENITE 2043
++teton MACH_TETON TETON 2044
+ ttc_dkb MACH_TTC_DKB TTC_DKB 2045
++bishop2 MACH_BISHOP2 BISHOP2 2046
++ippv5 MACH_IPPV5 IPPV5 2047
++farm926 MACH_FARM926 FARM926 2048
++mmccpu MACH_MMCCPU MMCCPU 2049
++sgmsfl MACH_SGMSFL SGMSFL 2050
++tt8000 MACH_TT8000 TT8000 2051
++zrn4300lp MACH_ZRN4300LP ZRN4300LP 2052
++mptc MACH_MPTC MPTC 2053
++h6051 MACH_H6051 H6051 2054
++pvg610_101 MACH_PVG610_101 PVG610_101 2055
++stamp9261_pc_evb MACH_STAMP9261_PC_EVB STAMP9261_PC_EVB 2056
++pelco_odysseus MACH_PELCO_ODYSSEUS PELCO_ODYSSEUS 2057
++tny_a9260 MACH_TNY_A9260 TNY_A9260 2058
++tny_a9g20 MACH_TNY_A9G20 TNY_A9G20 2059
++aesop_mp2530f MACH_AESOP_MP2530F AESOP_MP2530F 2060
++dx900 MACH_DX900 DX900 2061
++cpodc2 MACH_CPODC2 CPODC2 2062
++tilt_8925 MACH_TILT_8925 TILT_8925 2063
++davinci_dm357_evm MACH_DAVINCI_DM357_EVM DAVINCI_DM357_EVM 2064
++swordfish MACH_SWORDFISH SWORDFISH 2065
++corvus MACH_CORVUS CORVUS 2066
++taurus MACH_TAURUS TAURUS 2067
++axm MACH_AXM AXM 2068
++axc MACH_AXC AXC 2069
++baby MACH_BABY BABY 2070
++mp200 MACH_MP200 MP200 2071
+ pcm043 MACH_PCM043 PCM043 2072
++hanlin_v3c MACH_HANLIN_V3C HANLIN_V3C 2073
++kbk9g20 MACH_KBK9G20 KBK9G20 2074
++adsturbog5 MACH_ADSTURBOG5 ADSTURBOG5 2075
++avenger_lite1 MACH_AVENGER_LITE1 AVENGER_LITE1 2076
++suc82x MACH_SUC SUC 2077
++at91sam7s256 MACH_AT91SAM7S256 AT91SAM7S256 2078
++mendoza MACH_MENDOZA MENDOZA 2079
++kira MACH_KIRA KIRA 2080
++mx1hbm MACH_MX1HBM MX1HBM 2081
++quatro43xx MACH_QUATRO43XX QUATRO43XX 2082
++quatro4230 MACH_QUATRO4230 QUATRO4230 2083
++nsb400 MACH_NSB400 NSB400 2084
++drp255 MACH_DRP255 DRP255 2085
++thoth MACH_THOTH THOTH 2086
++firestone MACH_FIRESTONE FIRESTONE 2087
++asusp750 MACH_ASUSP750 ASUSP750 2088
++ctera_dl MACH_CTERA_DL CTERA_DL 2089
++socr MACH_SOCR SOCR 2090
++htcoxygen MACH_HTCOXYGEN HTCOXYGEN 2091
++heroc MACH_HEROC HEROC 2092
++zeno6800 MACH_ZENO6800 ZENO6800 2093
++sc2mcs MACH_SC2MCS SC2MCS 2094
++gene100 MACH_GENE100 GENE100 2095
++as353x MACH_AS353X AS353X 2096
+ sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097
++at91sam9g20 MACH_AT91SAM9G20 AT91SAM9G20 2098
++mv88f6192gtw_fe MACH_MV88F6192GTW_FE MV88F6192GTW_FE 2099
++cc9200 MACH_CC9200 CC9200 2100
++sm9200 MACH_SM9200 SM9200 2101
++tp9200 MACH_TP9200 TP9200 2102
++snapperdv MACH_SNAPPERDV SNAPPERDV 2103
+ avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104
++avengers_lite1 MACH_AVENGERS_LITE1 AVENGERS_LITE1 2105
++omap3axon MACH_OMAP3AXON OMAP3AXON 2106
++ma8xx MACH_MA8XX MA8XX 2107
++mp201ek MACH_MP201EK MP201EK 2108
++davinci_tux MACH_DAVINCI_TUX DAVINCI_TUX 2109
++mpa1600 MACH_MPA1600 MPA1600 2110
++pelco_troy MACH_PELCO_TROY PELCO_TROY 2111
++nsb667 MACH_NSB667 NSB667 2112
++rovers5_4mpix MACH_ROVERS5_4MPIX ROVERS5_4MPIX 2113
++twocom MACH_TWOCOM TWOCOM 2114
++ubisys_p9_rcu3r2 MACH_UBISYS_P9_RCU3R2 UBISYS_P9_RCU3R2 2115
++hero_espresso MACH_HERO_ESPRESSO HERO_ESPRESSO 2116
++afeusb MACH_AFEUSB AFEUSB 2117
++t830 MACH_T830 T830 2118
++spd8020_cc MACH_SPD8020_CC SPD8020_CC 2119
++om_3d7k MACH_OM_3D7K OM_3D7K 2120
++picocom2 MACH_PICOCOM2 PICOCOM2 2121
++uwg4mx27 MACH_UWG4MX27 UWG4MX27 2122
++uwg4mx31 MACH_UWG4MX31 UWG4MX31 2123
++cherry MACH_CHERRY CHERRY 2124
+ mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125
++s3c2440turkiye MACH_S3C2440TURKIYE S3C2440TURKIYE 2126
+ tx37 MACH_TX37 TX37 2127
++sbc2800_9g20 MACH_SBC2800_9G20 SBC2800_9G20 2128
++benzglb MACH_BENZGLB BENZGLB 2129
++benztd MACH_BENZTD BENZTD 2130
++cartesio_plus MACH_CARTESIO_PLUS CARTESIO_PLUS 2131
++solrad_g20 MACH_SOLRAD_G20 SOLRAD_G20 2132
++mx27wallace MACH_MX27WALLACE MX27WALLACE 2133
++fmzwebmodul MACH_FMZWEBMODUL FMZWEBMODUL 2134
+ rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135
++smallogger MACH_SMALLOGGER SMALLOGGER 2136
++ccw9p9215 MACH_CCW9P9215 CCW9P9215 2137
+ dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138
+ ts219 MACH_TS219 TS219 2139
++tny_a9263 MACH_TNY_A9263 TNY_A9263 2140
++apollo MACH_APOLLO APOLLO 2141
++at91cap9stk MACH_AT91CAP9STK AT91CAP9STK 2142
++spc300 MACH_SPC300 SPC300 2143
++eko MACH_EKO EKO 2144
++ccw9m2443 MACH_CCW9M2443 CCW9M2443 2145
++ccw9m2443js MACH_CCW9M2443JS CCW9M2443JS 2146
++m2m_router_device MACH_M2M_ROUTER_DEVICE M2M_ROUTER_DEVICE 2147
++str9104nas MACH_STAR9104NAS STAR9104NAS 2148
+ pca100 MACH_PCA100 PCA100 2149
++z3_dm365_mod_01 MACH_Z3_DM365_MOD_01 Z3_DM365_MOD_01 2150
++hipox MACH_HIPOX HIPOX 2151
++omap3_piteds MACH_OMAP3_PITEDS OMAP3_PITEDS 2152
++bm150r MACH_BM150R BM150R 2153
++tbone MACH_TBONE TBONE 2154
++merlin MACH_MERLIN MERLIN 2155
++falcon MACH_FALCON FALCON 2156
+ davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157
++s5p6440 MACH_S5P6440 S5P6440 2158
+ at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159
+ omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
++lpc313x MACH_LPC313X LPC313X 2161
+ magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
++magx_em30 MACH_MAGX_EM30 MAGX_EM30 2163
++magx_ve66 MACH_MAGX_VE66 MAGX_VE66 2164
++meesc MACH_MEESC MEESC 2165
++otc570 MACH_OTC570 OTC570 2166
++bcu2412 MACH_BCU2412 BCU2412 2167
++beacon MACH_BEACON BEACON 2168
++actia_tgw MACH_ACTIA_TGW ACTIA_TGW 2169
++e4430 MACH_E4430 E4430 2170
++ql300 MACH_QL300 QL300 2171
+ btmavb101 MACH_BTMAVB101 BTMAVB101 2172
+ btmawb101 MACH_BTMAWB101 BTMAWB101 2173
++sq201 MACH_SQ201 SQ201 2174
++quatro45xx MACH_QUATRO45XX QUATRO45XX 2175
++openpad MACH_OPENPAD OPENPAD 2176
+ tx25 MACH_TX25 TX25 2177
+ omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
++htcraphael_k MACH_HTCRAPHAEL_K HTCRAPHAEL_K 2179
++lal43 MACH_LAL43 LAL43 2181
++htcraphael_cdma500 MACH_HTCRAPHAEL_CDMA500 HTCRAPHAEL_CDMA500 2182
+ anw6410 MACH_ANW6410 ANW6410 2183
++htcprophet MACH_HTCPROPHET HTCPROPHET 2185
++cfa_10022 MACH_CFA_10022 CFA_10022 2186
+ imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
++px2imx27 MACH_PX2IMX27 PX2IMX27 2188
++stm3210e_eval MACH_STM3210E_EVAL STM3210E_EVAL 2189
++dvs10 MACH_DVS10 DVS10 2190
+ portuxg20 MACH_PORTUXG20 PORTUXG20 2191
++arm_spv MACH_ARM_SPV ARM_SPV 2192
+ smdkc110 MACH_SMDKC110 SMDKC110 2193
++cabespresso MACH_CABESPRESSO CABESPRESSO 2194
++hmc800 MACH_HMC800 HMC800 2195
++sholes MACH_SHOLES SHOLES 2196
++btmxc31 MACH_BTMXC31 BTMXC31 2197
++dt501 MACH_DT501 DT501 2198
++ktx MACH_KTX KTX 2199
+ omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200
+ netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201
+ netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202
+ d2net_v2 MACH_D2NET_V2 D2NET_V2 2203
+ net2big_v2 MACH_NET2BIG_V2 NET2BIG_V2 2204
++net4big_v2 MACH_NET4BIG_V2 NET4BIG_V2 2205
+ net5big_v2 MACH_NET5BIG_V2 NET5BIG_V2 2206
++endb2443 MACH_ENDB2443 ENDB2443 2207
+ inetspace_v2 MACH_INETSPACE_V2 INETSPACE_V2 2208
++tros MACH_TROS TROS 2209
++pelco_homer MACH_PELCO_HOMER PELCO_HOMER 2210
++ofsp8 MACH_OFSP8 OFSP8 2211
+ at91sam9g45ekes MACH_AT91SAM9G45EKES AT91SAM9G45EKES 2212
++guf_cupid MACH_GUF_CUPID GUF_CUPID 2213
++eab1r MACH_EAB1R EAB1R 2214
++desirec MACH_DESIREC DESIREC 2215
++cordoba MACH_CORDOBA CORDOBA 2216
++irvine MACH_IRVINE IRVINE 2217
++sff772 MACH_SFF772 SFF772 2218
++pelco_milano MACH_PELCO_MILANO PELCO_MILANO 2219
+ pc7302 MACH_PC7302 PC7302 2220
++bip6000 MACH_BIP6000 BIP6000 2221
++silvermoon MACH_SILVERMOON SILVERMOON 2222
++vc0830 MACH_VC0830 VC0830 2223
++dt430 MACH_DT430 DT430 2224
++ji42pf MACH_JI42PF JI42PF 2225
++gnet_ksm MACH_GNET_KSM GNET_KSM 2226
++gnet_sgm MACH_GNET_SGM GNET_SGM 2227
++gnet_sgr MACH_GNET_SGR GNET_SGR 2228
++omap3_icetekevm MACH_OMAP3_ICETEKEVM OMAP3_ICETEKEVM 2229
++pnp MACH_PNP PNP 2230
++ctera_2bay_k MACH_CTERA_2BAY_K CTERA_2BAY_K 2231
++ctera_2bay_u MACH_CTERA_2BAY_U CTERA_2BAY_U 2232
++sas_c MACH_SAS_C SAS_C 2233
++vma2315 MACH_VMA2315 VMA2315 2234
++vcs MACH_VCS VCS 2235
+ spear600 MACH_SPEAR600 SPEAR600 2236
+ spear300 MACH_SPEAR300 SPEAR300 2237
++spear1300 MACH_SPEAR1300 SPEAR1300 2238
+ lilly1131 MACH_LILLY1131 LILLY1131 2239
++arvoo_ax301 MACH_ARVOO_AX301 ARVOO_AX301 2240
++mapphone MACH_MAPPHONE MAPPHONE 2241
++legend MACH_LEGEND LEGEND 2242
++salsa MACH_SALSA SALSA 2243
++lounge MACH_LOUNGE LOUNGE 2244
++vision MACH_VISION VISION 2245
++vmb20 MACH_VMB20 VMB20 2246
++hy2410 MACH_HY2410 HY2410 2247
++hy9315 MACH_HY9315 HY9315 2248
++bullwinkle MACH_BULLWINKLE BULLWINKLE 2249
++arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250
++vs_v210 MACH_VS_V210 VS_V210 2252
++vs_v212 MACH_VS_V212 VS_V212 2253
+ hmt MACH_HMT HMT 2254
++km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255
++vesper MACH_VESPER VESPER 2256
++str9 MACH_STR9 STR9 2257
++omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
++simcom MACH_SIMCOM SIMCOM 2259
++mcwebio MACH_MCWEBIO MCWEBIO 2260
++omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_PHRAZER 2261
++darwin MACH_DARWIN DARWIN 2262
++oratiscomu MACH_ORATISCOMU ORATISCOMU 2263
++rtsbc20 MACH_RTSBC20 RTSBC20 2264
++sgh_i780 MACH_I780 I780 2265
++gemini324 MACH_GEMINI324 GEMINI324 2266
++oratislan MACH_ORATISLAN ORATISLAN 2267
++oratisalog MACH_ORATISALOG ORATISALOG 2268
++oratismadi MACH_ORATISMADI ORATISMADI 2269
++oratisot16 MACH_ORATISOT16 ORATISOT16 2270
++oratisdesk MACH_ORATISDESK ORATISDESK 2271
+ vexpress MACH_VEXPRESS VEXPRESS 2272
++sintexo MACH_SINTEXO SINTEXO 2273
++cm3389 MACH_CM3389 CM3389 2274
++omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
++sgh_i900 MACH_SGH_I900 SGH_I900 2276
++bst100 MACH_BST100 BST100 2277
++passion MACH_PASSION PASSION 2278
++indesign_at91sam MACH_INDESIGN_AT91SAM INDESIGN_AT91SAM 2279
++c4_badger MACH_C4_BADGER C4_BADGER 2280
++c4_viper MACH_C4_VIPER C4_VIPER 2281
+ d2net MACH_D2NET D2NET 2282
+ bigdisk MACH_BIGDISK BIGDISK 2283
++notalvision MACH_NOTALVISION NOTALVISION 2284
++omap3_kboc MACH_OMAP3_KBOC OMAP3_KBOC 2285
++cyclone MACH_CYCLONE CYCLONE 2286
++ninja MACH_NINJA NINJA 2287
+ at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
+ bcmring MACH_BCMRING BCMRING 2289
++resol_dl2 MACH_RESOL_DL2 RESOL_DL2 2290
++ifosw MACH_IFOSW IFOSW 2291
++htcrhodium MACH_HTCRHODIUM HTCRHODIUM 2292
++htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293
++matrix504 MACH_MATRIX504 MATRIX504 2294
++mrfsa MACH_MRFSA MRFSA 2295
++sc_p270 MACH_SC_P270 SC_P270 2296
++atlas5_evb MACH_ATLAS5_EVB ATLAS5_EVB 2297
++pelco_lobox MACH_PELCO_LOBOX PELCO_LOBOX 2298
++dilax_pcu200 MACH_DILAX_PCU200 DILAX_PCU200 2299
++leonardo MACH_LEONARDO LEONARDO 2300
++zoran_approach7 MACH_ZORAN_APPROACH7 ZORAN_APPROACH7 2301
+ dp6xx MACH_DP6XX DP6XX 2302
++bcm2153_vesper MACH_BCM2153_VESPER BCM2153_VESPER 2303
+ mahimahi MACH_MAHIMAHI MAHIMAHI 2304
++clickc MACH_CLICKC CLICKC 2305
++zb_gateway MACH_ZB_GATEWAY ZB_GATEWAY 2306
++tazcard MACH_TAZCARD TAZCARD 2307
++tazdev MACH_TAZDEV TAZDEV 2308
++annax_cb_arm MACH_ANNAX_CB_ARM ANNAX_CB_ARM 2309
++annax_dm3 MACH_ANNAX_DM3 ANNAX_DM3 2310
++cerebric MACH_CEREBRIC CEREBRIC 2311
++orca MACH_ORCA ORCA 2312
++pc9260 MACH_PC9260 PC9260 2313
++ems285a MACH_EMS285A EMS285A 2314
++gec2410 MACH_GEC2410 GEC2410 2315
++gec2440 MACH_GEC2440 GEC2440 2316
++mw903 MACH_ARCH_MW903 ARCH_MW903 2317
++mw2440 MACH_MW2440 MW2440 2318
++ecac2378 MACH_ECAC2378 ECAC2378 2319
++tazkiosk MACH_TAZKIOSK TAZKIOSK 2320
++whiterabbit_mch MACH_WHITERABBIT_MCH WHITERABBIT_MCH 2321
++sbox9263 MACH_SBOX9263 SBOX9263 2322
+ smdk6442 MACH_SMDK6442 SMDK6442 2324
+ openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325
++incredible MACH_INCREDIBLE INCREDIBLE 2326
++incrediblec MACH_INCREDIBLEC INCREDIBLEC 2327
++heroct MACH_HEROCT HEROCT 2328
++mmnet1000 MACH_MMNET1000 MMNET1000 2329
+ devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330
++devkit9000 MACH_DEVKIT9000 DEVKIT9000 2331
++mx31txtr MACH_MX31TXTR MX31TXTR 2332
++u380 MACH_U380 U380 2333
++oamp3_hualu MACH_HUALU_BOARD HUALU_BOARD 2334
++npcmx50 MACH_NPCMX50 NPCMX50 2335
+ mx51_efikamx MACH_MX51_EFIKAMX MX51_EFIKAMX 2336
++mx51_lange52 MACH_MX51_LANGE52 MX51_LANGE52 2337
++riom MACH_RIOM RIOM 2338
++comcas MACH_COMCAS COMCAS 2339
++wsi_mx27 MACH_WSI_MX27 WSI_MX27 2340
+ cm_t35 MACH_CM_T35 CM_T35 2341
+ net2big MACH_NET2BIG NET2BIG 2342
++motorola_a1600 MACH_MOTOROLA_A1600 MOTOROLA_A1600 2343
+ igep0020 MACH_IGEP0020 IGEP0020 2344
++igep0010 MACH_IGEP0010 IGEP0010 2345
++mv6281gtwge2 MACH_MV6281GTWGE2 MV6281GTWGE2 2346
++scat100 MACH_SCAT100 SCAT100 2347
++sanmina MACH_SANMINA SANMINA 2348
++momento MACH_MOMENTO MOMENTO 2349
++nuc9xx MACH_NUC9XX NUC9XX 2350
++nuc910evb MACH_NUC910EVB NUC910EVB 2351
++nuc920evb MACH_NUC920EVB NUC920EVB 2352
++nuc950evb MACH_NUC950EVB NUC950EVB 2353
++nuc945evb MACH_NUC945EVB NUC945EVB 2354
++nuc960evb MACH_NUC960EVB NUC960EVB 2355
+ nuc932evb MACH_NUC932EVB NUC932EVB 2356
++nuc900 MACH_NUC900 NUC900 2357
++sd1soc MACH_SD1SOC SD1SOC 2358
++ln2440bc MACH_LN2440BC LN2440BC 2359
++rsbc MACH_RSBC RSBC 2360
+ openrd_client MACH_OPENRD_CLIENT OPENRD_CLIENT 2361
++hpipaq11x MACH_HPIPAQ11X HPIPAQ11X 2362
++wayland MACH_WAYLAND WAYLAND 2363
++acnbsx102 MACH_ACNBSX102 ACNBSX102 2364
++hwat91 MACH_HWAT91 HWAT91 2365
++at91sam9263cs MACH_AT91SAM9263CS AT91SAM9263CS 2366
++csb732 MACH_CSB732 CSB732 2367
+ u8500 MACH_U8500 U8500 2368
++huqiu MACH_HUQIU HUQIU 2369
+ mx51_efikasb MACH_MX51_EFIKASB MX51_EFIKASB 2370
++pmt1g MACH_PMT1G PMT1G 2371
++htcelf MACH_HTCELF HTCELF 2372
++armadillo420 MACH_ARMADILLO420 ARMADILLO420 2373
++armadillo440 MACH_ARMADILLO440 ARMADILLO440 2374
++u_chip_dual_arm MACH_U_CHIP_DUAL_ARM U_CHIP_DUAL_ARM 2375
++csr_bdb3 MACH_CSR_BDB3 CSR_BDB3 2376
++dolby_cat1018 MACH_DOLBY_CAT1018 DOLBY_CAT1018 2377
++hy9307 MACH_HY9307 HY9307 2378
++aspire_easystore MACH_A_ES A_ES 2379
++davinci_irif MACH_DAVINCI_IRIF DAVINCI_IRIF 2380
++agama9263 MACH_AGAMA9263 AGAMA9263 2381
+ marvell_jasper MACH_MARVELL_JASPER MARVELL_JASPER 2382
+ flint MACH_FLINT FLINT 2383
+ tavorevb3 MACH_TAVOREVB3 TAVOREVB3 2384
++sch_m490 MACH_SCH_M490 SCH_M490 2386
++rbl01 MACH_RBL01 RBL01 2387
++omnifi MACH_OMNIFI OMNIFI 2388
++otavalo MACH_OTAVALO OTAVALO 2389
++htc_excalibur_s620 MACH_HTC_EXCALIBUR_S620 HTC_EXCALIBUR_S620 2391
++htc_opal MACH_HTC_OPAL HTC_OPAL 2392
+ touchbook MACH_TOUCHBOOK TOUCHBOOK 2393
++latte MACH_LATTE LATTE 2394
++xa200 MACH_XA200 XA200 2395
++nimrod MACH_NIMROD NIMROD 2396
++cc9p9215_3g MACH_CC9P9215_3G CC9P9215_3G 2397
++cc9p9215_3gjs MACH_CC9P9215_3GJS CC9P9215_3GJS 2398
++tk71 MACH_TK71 TK71 2399
++comham3525 MACH_COMHAM3525 COMHAM3525 2400
++mx31erebus MACH_MX31EREBUS MX31EREBUS 2401
++mcardmx27 MACH_MCARDMX27 MCARDMX27 2402
++paradise MACH_PARADISE PARADISE 2403
++tide MACH_TIDE TIDE 2404
++wzl2440 MACH_WZL2440 WZL2440 2405
++sdrdemo MACH_SDRDEMO SDRDEMO 2406
++ethercan2 MACH_ETHERCAN2 ETHERCAN2 2407
++ecmimg20 MACH_ECMIMG20 ECMIMG20 2408
++omap_dragon MACH_OMAP_DRAGON OMAP_DRAGON 2409
++halo MACH_HALO HALO 2410
++huangshan MACH_HUANGSHAN HUANGSHAN 2411
++vl_ma2sc MACH_VL_MA2SC VL_MA2SC 2412
+ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413
+ raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414
+ raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415
++multibus_master MACH_MULTIBUS_MASTER MULTIBUS_MASTER 2416
++multibus_pbk MACH_MULTIBUS_PBK MULTIBUS_PBK 2417
+ tnetv107x MACH_TNETV107X TNETV107X 2418
++snake MACH_SNAKE SNAKE 2419
++cwmx27 MACH_CWMX27 CWMX27 2420
++sch_m480 MACH_SCH_M480 SCH_M480 2421
++platypus MACH_PLATYPUS PLATYPUS 2422
++pss2 MACH_PSS2 PSS2 2423
++davinci_apm150 MACH_DAVINCI_APM150 DAVINCI_APM150 2424
++str9100 MACH_STR9100 STR9100 2425
++net5big MACH_NET5BIG NET5BIG 2426
++seabed9263 MACH_SEABED9263 SEABED9263 2427
+ mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
++octvocplus_eb MACH_OCTVOCPLUS_EB OCTVOCPLUS_EB 2429
++klk_firefox MACH_KLK_FIREFOX KLK_FIREFOX 2430
++klk_wirma_module MACH_KLK_WIRMA_MODULE KLK_WIRMA_MODULE 2431
++klk_wirma_mmi MACH_KLK_WIRMA_MMI KLK_WIRMA_MMI 2432
++supersonic MACH_SUPERSONIC SUPERSONIC 2433
++liberty MACH_LIBERTY LIBERTY 2434
++mh355 MACH_MH355 MH355 2435
++pc7802 MACH_PC7802 PC7802 2436
++gnet_sgc MACH_GNET_SGC GNET_SGC 2437
++einstein15 MACH_EINSTEIN15 EINSTEIN15 2438
++cmpd MACH_CMPD CMPD 2439
++davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440
++lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441
++ea313x MACH_EA313X EA313X 2442
++fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443
++fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444
++pelco_moe MACH_PELCO_MOE PELCO_MOE 2445
++minimix27 MACH_MINIMIX27 MINIMIX27 2446
++omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447
++passionc MACH_PASSIONC PASSIONC 2448
++mx27amata MACH_MX27AMATA MX27AMATA 2449
++bgat1 MACH_BGAT1 BGAT1 2450
++buzz MACH_BUZZ BUZZ 2451
++mb9g20 MACH_MB9G20 MB9G20 2452
++yushan MACH_YUSHAN YUSHAN 2453
++lizard MACH_LIZARD LIZARD 2454
++omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455
+ smdkv210 MACH_SMDKV210 SMDKV210 2456
++bravo MACH_BRAVO BRAVO 2457
++siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458
++siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459
++sm3k MACH_SM3K SM3K 2460
++acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461
++glittertind MACH_GLITTERTIND GLITTERTIND 2463
+ omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
+ omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
++cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
++torino_s MACH_TORINO_S TORINO_S 2467
++havana MACH_HAVANA HAVANA 2468
++beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469
++vanguard MACH_VANGUARD VANGUARD 2470
++s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471
++cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472
++aster MACH_ASTER ASTER 2473
++voguesv210 MACH_VOGUESV210 VOGUESV210 2474
++acm500x MACH_ACM500X ACM500X 2475
++km9260 MACH_KM9260 KM9260 2476
++nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477
++ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478
+ smartq7 MACH_SMARTQ7 SMARTQ7 2479
++at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480
++asusp527 MACH_ASUSP527 ASUSP527 2481
++at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482
++topasa900 MACH_TOPASA900 TOPASA900 2483
++electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484
++mx51grb MACH_MX51GRB MX51GRB 2485
++xea300 MACH_XEA300 XEA300 2486
++htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487
++lima MACH_LIMA LIMA 2488
++csb740 MACH_CSB740 CSB740 2489
++usb_s8815 MACH_USB_S8815 USB_S8815 2490
+ watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
++milkyway MACH_MILKYWAY MILKYWAY 2492
+ g4evm MACH_G4EVM G4EVM 2493
++picomod6 MACH_PICOMOD6 PICOMOD6 2494
+ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
++ip6000 MACH_IP6000 IP6000 2496
++ip6010 MACH_IP6010 IP6010 2497
++utm400 MACH_UTM400 UTM400 2498
++omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499
++wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500
++sx560 MACH_SX560 SX560 2501
+ ts41x MACH_TS41X TS41X 2502
++elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503
++rhobot MACH_RHOBOT RHOBOT 2504
++mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505
++ls9260 MACH_LS9260 LS9260 2506
++shank MACH_SHANK SHANK 2507
++qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508
++at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509
++hiram MACH_HIRAM HIRAM 2510
+ phy3250 MACH_PHY3250 PHY3250 2511
++ea3250 MACH_EA3250 EA3250 2512
++fdi3250 MACH_FDI3250 FDI3250 2513
++at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515
++ccmx51 MACH_CCMX51 CCMX51 2516
++ccmx51js MACH_CCMX51JS CCMX51JS 2517
++ccwmx51 MACH_CCWMX51 CCWMX51 2518
++ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519
+ mini6410 MACH_MINI6410 MINI6410 2520
++tiny6410 MACH_TINY6410 TINY6410 2521
++nano6410 MACH_NANO6410 NANO6410 2522
++at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523
++htcleo MACH_HTCLEO HTCLEO 2524
++avp13 MACH_AVP13 AVP13 2525
++xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526
++vpnext MACH_VPNEXT VPNEXT 2527
++swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528
+ tx51 MACH_TX51 TX51 2529
++dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530
+ mx28evk MACH_MX28EVK MX28EVK 2531
++phoenix260 MACH_PHOENIX260 PHOENIX260 2532
++uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533
+ smartq5 MACH_SMARTQ5 SMARTQ5 2534
++all3078 MACH_ALL3078 ALL3078 2535
++ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536
++siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537
++epb5000 MACH_EPB5000 EPB5000 2538
++hy9263 MACH_HY9263 HY9263 2539
++acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540
++acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541
++acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542
++acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543
++acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544
++bonnell MACH_BONNELL BONNELL 2545
++oht_mx27 MACH_OHT_MX27 OHT_MX27 2546
++htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547
+ davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
++c3ax03 MACH_C3AX03 C3AX03 2549
+ mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
++esyx MACH_ESYX ESYX 2551
++dove_db2 MACH_DOVE_DB2 DOVE_DB2 2552
++bulldog MACH_BULLDOG BULLDOG 2553
++derell_me2000 MACH_DERELL_ME2000 DERELL_ME2000 2554
++bcmring_base MACH_BCMRING_BASE BCMRING_BASE 2555
++bcmring_evm MACH_BCMRING_EVM BCMRING_EVM 2556
++bcmring_evm_jazz MACH_BCMRING_EVM_JAZZ BCMRING_EVM_JAZZ 2557
++bcmring_sp MACH_BCMRING_SP BCMRING_SP 2558
++bcmring_sv MACH_BCMRING_SV BCMRING_SV 2559
++bcmring_sv_jazz MACH_BCMRING_SV_JAZZ BCMRING_SV_JAZZ 2560
++bcmring_tablet MACH_BCMRING_TABLET BCMRING_TABLET 2561
++bcmring_vp MACH_BCMRING_VP BCMRING_VP 2562
++bcmring_evm_seikor MACH_BCMRING_EVM_SEIKOR BCMRING_EVM_SEIKOR 2563
++bcmring_sp_wqvga MACH_BCMRING_SP_WQVGA BCMRING_SP_WQVGA 2564
++bcmring_custom MACH_BCMRING_CUSTOM BCMRING_CUSTOM 2565
++acer_s200 MACH_ACER_S200 ACER_S200 2566
++bt270 MACH_BT270 BT270 2567
++iseo MACH_ISEO ISEO 2568
++cezanne MACH_CEZANNE CEZANNE 2569
++lucca MACH_LUCCA LUCCA 2570
++supersmart MACH_SUPERSMART SUPERSMART 2571
++arm11_board MACH_CS_MISANO CS_MISANO 2572
++magnolia2 MACH_MAGNOLIA2 MAGNOLIA2 2573
++emxx MACH_EMXX EMXX 2574
++outlaw MACH_OUTLAW OUTLAW 2575
+ riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576
++riot_gx2 MACH_RIOT_VOX RIOT_VOX 2577
+ riot_x37 MACH_RIOT_X37 RIOT_X37 2578
++mega25mx MACH_MEGA25MX MEGA25MX 2579
++benzina2 MACH_BENZINA2 BENZINA2 2580
++ignite MACH_IGNITE IGNITE 2581
++foggia MACH_FOGGIA FOGGIA 2582
++arezzo MACH_AREZZO AREZZO 2583
++leica_skywalker MACH_LEICA_SKYWALKER LEICA_SKYWALKER 2584
++jacinto2_jamr MACH_JACINTO2_JAMR JACINTO2_JAMR 2585
++gts_nova MACH_GTS_NOVA GTS_NOVA 2586
++p3600 MACH_P3600 P3600 2587
++dlt2 MACH_DLT2 DLT2 2588
++df3120 MACH_DF3120 DF3120 2589
++ecucore_9g20 MACH_ECUCORE_9G20 ECUCORE_9G20 2590
++nautel_am35xx MACH_NAUTEL_LPC3240 NAUTEL_LPC3240 2591
++glacier MACH_GLACIER GLACIER 2592
++phrazer_bulldog MACH_PHRAZER_BULLDOG PHRAZER_BULLDOG 2593
++omap3_bulldog MACH_OMAP3_BULLDOG OMAP3_BULLDOG 2594
+ pca101 MACH_PCA101 PCA101 2595
++buzzc MACH_BUZZC BUZZC 2596
++sasie2 MACH_SASIE2 SASIE2 2597
++smartmeter_dl MACH_SMARTMETER_DL SMARTMETER_DL 2599
++wzl6410 MACH_WZL6410 WZL6410 2600
++wzl6410m MACH_WZL6410M WZL6410M 2601
++wzl6410f MACH_WZL6410F WZL6410F 2602
++wzl6410i MACH_WZL6410I WZL6410I 2603
++spacecom1 MACH_SPACECOM1 SPACECOM1 2604
++pingu920 MACH_PINGU920 PINGU920 2605
++bravoc MACH_BRAVOC BRAVOC 2606
++vdssw MACH_VDSSW VDSSW 2608
++romulus MACH_ROMULUS ROMULUS 2609
++omap_magic MACH_OMAP_MAGIC OMAP_MAGIC 2610
++eltd100 MACH_ELTD100 ELTD100 2611
+ capc7117 MACH_CAPC7117 CAPC7117 2612
++swan MACH_SWAN SWAN 2613
++veu MACH_VEU VEU 2614
++rm2 MACH_RM2 RM2 2615
++tt2100 MACH_TT2100 TT2100 2616
++venice MACH_VENICE VENICE 2617
++pc7323 MACH_PC7323 PC7323 2618
++masp MACH_MASP MASP 2619
++fujitsu_tvstbsoc0 MACH_FUJITSU_TVSTBSOC FUJITSU_TVSTBSOC 2620
++fujitsu_tvstbsoc1 MACH_FUJITSU_TVSTBSOC1 FUJITSU_TVSTBSOC1 2621
++lexikon MACH_LEXIKON LEXIKON 2622
++mini2440v2 MACH_MINI2440V2 MINI2440V2 2623
+ icontrol MACH_ICONTROL ICONTROL 2624
+-gplugd MACH_GPLUGD GPLUGD 2625
++gplugd MACH_SHEEVAD SHEEVAD 2625
++qsd8x50a_st1_1 MACH_QSD8X50A_ST1_1 QSD8X50A_ST1_1 2626
+ qsd8x50a_st1_5 MACH_QSD8X50A_ST1_5 QSD8X50A_ST1_5 2627
++bee MACH_BEE BEE 2628
+ mx23evk MACH_MX23EVK MX23EVK 2629
+ ap4evb MACH_AP4EVB AP4EVB 2630
++stockholm MACH_STOCKHOLM STOCKHOLM 2631
++lpc_h3131 MACH_LPC_H3131 LPC_H3131 2632
++stingray MACH_STINGRAY STINGRAY 2633
++kraken MACH_KRAKEN KRAKEN 2634
++gw2388 MACH_GW2388 GW2388 2635
++jadecpu MACH_JADECPU JADECPU 2636
++carlisle MACH_CARLISLE CARLISLE 2637
++lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
++nemid_tb MACH_NEMID_TB NEMID_TB 2639
++terrier MACH_TERRIER TERRIER 2640
++turbot MACH_TURBOT TURBOT 2641
++sanddab MACH_SANDDAB SANDDAB 2642
++mx35_cicada MACH_MX35_CICADA MX35_CICADA 2643
++ghi2703d MACH_GHI2703D GHI2703D 2644
++lux_sfx9 MACH_LUX_SFX9 LUX_SFX9 2645
++lux_sf9g MACH_LUX_SF9G LUX_SF9G 2646
++lux_edk9 MACH_LUX_EDK9 LUX_EDK9 2647
++hw90240 MACH_HW90240 HW90240 2648
++dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
+ mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
++scat110 MACH_SCAT110 SCAT110 2651
++acer_a1 MACH_ACER_A1 ACER_A1 2652
++cmcontrol MACH_CMCONTROL CMCONTROL 2653
++pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
++rfp43 MACH_RFP43 RFP43 2655
++sk86r0301 MACH_SK86R0301 SK86R0301 2656
++ctpxa MACH_CTPXA CTPXA 2657
++epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
+ guruplug MACH_GURUPLUG GURUPLUG 2659
+ spear310 MACH_SPEAR310 SPEAR310 2660
+ spear320 MACH_SPEAR320 SPEAR320 2661
++robotx MACH_ROBOTX ROBOTX 2662
++lsxhl MACH_LSXHL LSXHL 2663
++smartlite MACH_SMARTLITE SMARTLITE 2664
++cws2 MACH_CWS2 CWS2 2665
++m619 MACH_M619 M619 2666
++smartview MACH_SMARTVIEW SMARTVIEW 2667
++lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
++kizbox MACH_KIZBOX KIZBOX 2669
++htccharmer MACH_HTCCHARMER HTCCHARMER 2670
++guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
++pm9g45 MACH_PM9G45 PM9G45 2672
++htcpanther MACH_HTCPANTHER HTCPANTHER 2673
++htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
++reb01 MACH_REB01 REB01 2675
+ aquila MACH_AQUILA AQUILA 2676
++spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
+ esata_sheevaplug MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
+ msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679
++micro2440 MACH_MICRO2440 MICRO2440 2680
++am2440 MACH_AM2440 AM2440 2681
++tq2440 MACH_TQ2440 TQ2440 2682
+ ea2478devkit MACH_EA2478DEVKIT EA2478DEVKIT 2683
++ak880x MACH_AK880X AK880X 2684
++cobra3530 MACH_COBRA3530 COBRA3530 2685
++pmppb MACH_PMPPB PMPPB 2686
++u6715 MACH_U6715 U6715 2687
++axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
++g30_dvb MACH_G30_DVB G30_DVB 2689
++vc088x MACH_VC088X VC088X 2690
++mioa702 MACH_MIOA702 MIOA702 2691
++hpmin MACH_HPMIN HPMIN 2692
++ak880xak MACH_AK880XAK AK880XAK 2693
++arm926tomap850 MACH_ARM926TOMAP850 ARM926TOMAP850 2694
++lkevm MACH_LKEVM LKEVM 2695
++mw6410 MACH_MW6410 MW6410 2696
+ terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697
++cpu8000e MACH_CPU8000E CPU8000E 2698
++tokyo MACH_TOKYO TOKYO 2700
++msm7201a_surf MACH_MSM7201A_SURF MSM7201A_SURF 2701
++msm7201a_ffa MACH_MSM7201A_FFA MSM7201A_FFA 2702
+ msm7x25_surf MACH_MSM7X25_SURF MSM7X25_SURF 2703
+ msm7x25_ffa MACH_MSM7X25_FFA MSM7X25_FFA 2704
+ msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705
+ msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706
+ msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707
+ qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708
++qsd8x50_comet MACH_QSD8X50_COMET QSD8X50_COMET 2709
++qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710
++qsd8x50a_surf MACH_QSD8X50A_SURF QSD8X50A_SURF 2711
++qsd8x50a_ffa MACH_QSD8X50A_FFA QSD8X50A_FFA 2712
++adx_xgcp10 MACH_ADX_XGCP10 ADX_XGCP10 2713
++mcgwumts2a MACH_MCGWUMTS2A MCGWUMTS2A 2714
++mobikt MACH_MOBIKT MOBIKT 2715
+ mx53_evk MACH_MX53_EVK MX53_EVK 2716
+ igep0030 MACH_IGEP0030 IGEP0030 2717
++axell_h40_h50_ctrl MACH_AXELL_H40_H50_CTRL AXELL_H40_H50_CTRL 2718
++dtcommod MACH_DTCOMMOD DTCOMMOD 2719
++gould MACH_GOULD GOULD 2720
++siberia MACH_SIBERIA SIBERIA 2721
+ sbc3530 MACH_SBC3530 SBC3530 2722
++qarm MACH_QARM QARM 2723
++mips MACH_MIPS MIPS 2724
++mx27grb MACH_MX27GRB MX27GRB 2725
++sbc8100 MACH_SBC8100 SBC8100 2726
+ saarb MACH_SAARB SAARB 2727
++omap3mini MACH_OMAP3MINI OMAP3MINI 2728
++cnmbook7se MACH_CNMBOOK7SE CNMBOOK7SE 2729
++catan MACH_CATAN CATAN 2730
+ harmony MACH_HARMONY HARMONY 2731
++tonga MACH_TONGA TONGA 2732
++cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733
++htcrhodiumcdma MACH_HTCRHODIUMCDMA HTCRHODIUMCDMA 2734
++epc_g45 MACH_EPC_G45 EPC_G45 2735
++epc_lpc3250 MACH_EPC_LPC3250 EPC_LPC3250 2736
++mxc91341evb MACH_MXC91341EVB MXC91341EVB 2737
++rtw1000 MACH_RTW1000 RTW1000 2738
++bobcat MACH_BOBCAT BOBCAT 2739
++trizeps6 MACH_TRIZEPS6 TRIZEPS6 2740
+ msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741
++nedap9263 MACH_NEDAP9263 NEDAP9263 2742
++netgear_ms2110 MACH_NETGEAR_MS2110 NETGEAR_MS2110 2743
++bmx MACH_BMX BMX 2744
++netstream MACH_NETSTREAM NETSTREAM 2745
++vpnext_rcu MACH_VPNEXT_RCU VPNEXT_RCU 2746
++vpnext_mpu MACH_VPNEXT_MPU VPNEXT_MPU 2747
++bcmring_tablet_v1 MACH_BCMRING_TABLET_V1 BCMRING_TABLET_V1 2748
++sgarm10 MACH_SGARM10 SGARM10 2749
+ cm_t3517 MACH_CM_T3517 CM_T3517 2750
++dig297 MACH_OMAP3_CPS OMAP3_CPS 2751
++axar1500_receiver MACH_AXAR1500_RECEIVER AXAR1500_RECEIVER 2752
+ wbd222 MACH_WBD222 WBD222 2753
++mt65xx MACH_MT65XX MT65XX 2754
+ msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755
+ msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756
+ tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758
+ nanos MACH_NANOS NANOS 2759
++stamp9g10 MACH_STAMP9G10 STAMP9G10 2760
+ stamp9g45 MACH_STAMP9G45 STAMP9G45 2761
++h6053 MACH_H6053 H6053 2762
++smint01 MACH_SMINT01 SMINT01 2763
++prtlvt2 MACH_PRTLVT2 PRTLVT2 2764
++ap420 MACH_AP420 AP420 2765
++davinci_dm365_fc MACH_DAVINCI_DM365_FC DAVINCI_DM365_FC 2767
++msm8x55_surf MACH_MSM8X55_SURF MSM8X55_SURF 2768
++msm8x55_ffa MACH_MSM8X55_FFA MSM8X55_FFA 2769
++esl_vamana MACH_ESL_VAMANA ESL_VAMANA 2770
++sbc35 MACH_SBC35 SBC35 2771
++mpx6446 MACH_MPX6446 MPX6446 2772
++oreo_controller MACH_OREO_CONTROLLER OREO_CONTROLLER 2773
++kopin_models MACH_KOPIN_MODELS KOPIN_MODELS 2774
++ttc_vision2 MACH_TTC_VISION2 TTC_VISION2 2775
+ cns3420vb MACH_CNS3420VB CNS3420VB 2776
++lpc_evo MACH_LPC2 LPC2 2777
++olympus MACH_OLYMPUS OLYMPUS 2778
++vortex MACH_VORTEX VORTEX 2779
++s5pc200 MACH_S5PC200 S5PC200 2780
++ecucore_9263 MACH_ECUCORE_9263 ECUCORE_9263 2781
++smdkc200 MACH_SMDKC200 SMDKC200 2782
++emsiso_sx27 MACH_EMSISO_SX27 EMSISO_SX27 2783
++apx_som9g45_ek MACH_APX_SOM9G45_EK APX_SOM9G45_EK 2784
++songshan MACH_SONGSHAN SONGSHAN 2785
++tianshan MACH_TIANSHAN TIANSHAN 2786
++vpx500 MACH_VPX500 VPX500 2787
++am3517sam MACH_AM3517SAM AM3517SAM 2788
++skat91_sim508 MACH_SKAT91_SIM508 SKAT91_SIM508 2789
++skat91_s3e MACH_SKAT91_S3E SKAT91_S3E 2790
+ omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
++df7220 MACH_DF7220 DF7220 2792
++nemini MACH_NEMINI NEMINI 2793
++t8200 MACH_T8200 T8200 2794
++apf51 MACH_APF51 APF51 2795
++dr_rc_unit MACH_DR_RC_UNIT DR_RC_UNIT 2796
++bordeaux MACH_BORDEAUX BORDEAUX 2797
++catania_b MACH_CATANIA_B CATANIA_B 2798
++mx51_ocean MACH_MX51_OCEAN MX51_OCEAN 2799
+ ti8168evm MACH_TI8168EVM TI8168EVM 2800
++neocoreomap MACH_NEOCOREOMAP NEOCOREOMAP 2801
++withings_wbp MACH_WITHINGS_WBP WITHINGS_WBP 2802
++dbps MACH_DBPS DBPS 2803
++pcbfp0001 MACH_PCBFP0001 PCBFP0001 2805
++speedy MACH_SPEEDY SPEEDY 2806
++chrysaor MACH_CHRYSAOR CHRYSAOR 2807
++tango MACH_TANGO TANGO 2808
++synology_dsx11 MACH_SYNOLOGY_DSX11 SYNOLOGY_DSX11 2809
++hanlin_v3ext MACH_HANLIN_V3EXT HANLIN_V3EXT 2810
++hanlin_v5 MACH_HANLIN_V5 HANLIN_V5 2811
++hanlin_v3plus MACH_HANLIN_V3PLUS HANLIN_V3PLUS 2812
++iriver_story MACH_IRIVER_STORY IRIVER_STORY 2813
++irex_iliad MACH_IREX_ILIAD IREX_ILIAD 2814
++irex_dr1000 MACH_IREX_DR1000 IREX_DR1000 2815
+ teton_bga MACH_TETON_BGA TETON_BGA 2816
++snapper9g45 MACH_SNAPPER9G45 SNAPPER9G45 2817
++tam3517 MACH_TAM3517 TAM3517 2818
++pdc100 MACH_PDC100 PDC100 2819
+ eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820
+ eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
+ eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
+ eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
++p565 MACH_P565 P565 2824
++acer_a4 MACH_ACER_A4 ACER_A4 2825
++davinci_dm368_bip MACH_DAVINCI_DM368_BIP DAVINCI_DM368_BIP 2826
++eshare MACH_ESHARE ESHARE 2827
++wlbargn MACH_WLBARGN WLBARGN 2829
++bm170 MACH_BM170 BM170 2830
++netspace_mini_v2 MACH_NETSPACE_MINI_V2 NETSPACE_MINI_V2 2831
++netspace_plug_v2 MACH_NETSPACE_PLUG_V2 NETSPACE_PLUG_V2 2832
++siemens_l1 MACH_SIEMENS_L1 SIEMENS_L1 2833
++elv_lcu1 MACH_ELV_LCU1 ELV_LCU1 2834
++mcu1 MACH_MCU1 MCU1 2835
++omap3_tao3530 MACH_OMAP3_TAO3530 OMAP3_TAO3530 2836
++omap3_pcutouch MACH_OMAP3_PCUTOUCH OMAP3_PCUTOUCH 2837
+ smdkc210 MACH_SMDKC210 SMDKC210 2838
+ pca102 MACH_PCA102 PCA102 2843
+ t5325 MACH_T5325 T5325 2846
+@@ -523,6 +2842,7 @@ msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X6
+ msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
+ cm_a510 MACH_CM_A510 CM_A510 3020
+ tx28 MACH_TX28 TX28 3043
++ep3505 MACH_EP3517 EP3517 3056
+ pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
+ vpr200 MACH_VPR200 VPR200 3087
+ torbreck MACH_TORBRECK TORBRECK 3090
+@@ -603,7 +2923,9 @@ isc3 MACH_ISC3 ISC3 3291
+ rascal MACH_RASCAL RASCAL 3292
+ hrefv60 MACH_HREFV60 HREFV60 3293
+ tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294
++pydtd MACH_PYRAMID_TD PYRAMID_TD 3295
+ splendor MACH_SPLENDOR SPLENDOR 3296
++guf_vincell MACH_GUF_PLANET GUF_PLANET 3297
+ msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298
+ htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299
+ athene MACH_ATHENE ATHENE 3300
+@@ -614,6 +2936,7 @@ rfl109145_ssrv MACH_RFL109145_SSRV RFL1
+ nmh MACH_NMH NMH 3305
+ wn802t MACH_WN802T WN802T 3306
+ dragonet MACH_DRAGONET DRAGONET 3307
++geneva_b4 MACH_GENEVA_B GENEVA_B 3308
+ at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309
+ bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310
+ bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311
+@@ -691,11 +3014,14 @@ viprinet MACH_VIPRINET VIPRINET 3385
+ bockw MACH_BOCKW BOCKW 3386
+ eva2000 MACH_EVA2000 EVA2000 3387
+ steelyard MACH_STEELYARD STEELYARD 3388
++ea2468devkit MACH_LPC2468OEM LPC2468OEM 3389
++fe2478mblox MACH_LPC2478MICROBLOX LPC2478MICROBLOX 3391
+ nsslsboard MACH_NSSLSBOARD NSSLSBOARD 3392
+ geneva_b5 MACH_GENEVA_B5 GENEVA_B5 3393
+ spear1340 MACH_SPEAR1340 SPEAR1340 3394
+ rexmas MACH_REXMAS REXMAS 3395
+ msm8960_cdp MACH_MSM8960_CDP MSM8960_CDP 3396
++msm8960_mtp MACH_MSM8960_MDP MSM8960_MDP 3397
+ msm8960_fluid MACH_MSM8960_FLUID MSM8960_FLUID 3398
+ msm8960_apq MACH_MSM8960_APQ MSM8960_APQ 3399
+ helios_v2 MACH_HELIOS_V2 HELIOS_V2 3400
+@@ -727,6 +3053,7 @@ gt_i5700 MACH_GT_I5700 GT_I5700 3425
+ ctera_plug_c2 MACH_CTERA_PLUG_C2 CTERA_PLUG_C2 3426
+ marvelct MACH_MARVELCT MARVELCT 3427
+ ag11005 MACH_AG11005 AG11005 3428
++omap_tabletblaze MACH_OMAP_BLAZE OMAP_BLAZE 3429
+ vangogh MACH_VANGOGH VANGOGH 3430
+ matrix505 MACH_MATRIX505 MATRIX505 3431
+ oce_nigma MACH_OCE_NIGMA OCE_NIGMA 3432
+@@ -766,6 +3093,7 @@ h1600 MACH_H1600 H1600 3465
+ mini210 MACH_MINI210 MINI210 3466
+ mini8168 MACH_MINI8168 MINI8168 3467
+ pc7308 MACH_PC7308 PC7308 3468
++ge863pro3 MACH_GE863 GE863 3469
+ kmm2m01 MACH_KMM2M01 KMM2M01 3470
+ mx51erebus MACH_MX51EREBUS MX51EREBUS 3471
+ wm8650refboard MACH_WM8650REFBOARD WM8650REFBOARD 3472
diff --git a/target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch b/target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch
new file mode 100644
index 0000000..fb16e2a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch
@@ -0,0 +1,64 @@
+Although the commit "98af057092f8f0dabe63c5df08adc2bbfbddb1d2
+ ARM: 6126/1: ARM mpcore_wdt: fix build failure and other fixes"
+resolved long standing mpcore_wdt driver build problems, it
+introduced an error in the relationship between the MPcore watchdog
+timer clock rate and mpcore_margin, "MPcore timer margin in seconds",
+such that watchdog timeouts are now arbitrary rather than the number
+of seconds specified by mpcore_margin.
+
+This change restores mpcore_wdt_keepalive() to its equivalent
+implementation prior to commit 98af057 such that watchdog timeouts now
+occur as specified by mpcore_margin.
+
+The variable 'mpcore_timer_rate' which caused that build failure was
+replaced by 'twd_timer_rate'. Adding exported function to obtain
+'twd_timer_rate' value in mpcore_wdt driver.
+
+MPCORE_WATCHDOG needed to build 'mpcore_wdt' already depends on
+HAVE_ARM_TWD needed to build 'smp_twd', so from the point of view of
+'mpcore_wdt' driver the exported function will always exist.
+
+Signed-off-by: Valentine Barshak <vbarshak@mvista.com>
+Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com>
+---
+
+ arch/arm/include/asm/smp_twd.h | 1 +
+ arch/arm/kernel/smp_twd.c | 7 +++++++
+ drivers/watchdog/mpcore_wdt.c | 4 +---
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/include/asm/smp_twd.h
++++ b/arch/arm/include/asm/smp_twd.h
+@@ -24,5 +24,6 @@ extern void __iomem *twd_base;
+
+ void twd_timer_setup(struct clock_event_device *);
+ void twd_timer_stop(struct clock_event_device *);
++unsigned long twd_timer_get_rate(void);
+
+ #endif
+--- a/arch/arm/kernel/smp_twd.c
++++ b/arch/arm/kernel/smp_twd.c
+@@ -268,3 +268,10 @@ void __cpuinit twd_timer_setup(struct cl
+ 0xf, 0xffffffff);
+ enable_percpu_irq(clk->irq, 0);
+ }
++
++/* Needed by mpcore_wdt */
++unsigned long twd_timer_get_rate(void)
++{
++ return twd_timer_rate;
++}
++EXPORT_SYMBOL_GPL(twd_timer_get_rate);
+--- a/drivers/watchdog/mpcore_wdt.c
++++ b/drivers/watchdog/mpcore_wdt.c
+@@ -99,9 +99,7 @@ static void mpcore_wdt_keepalive(struct
+
+ spin_lock(&wdt_lock);
+ /* Assume prescale is set to 256 */
+- count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
+- count = (0xFFFFFFFFU - count) * (HZ / 5);
+- count = (count / 256) * mpcore_margin;
++ count = (twd_timer_get_rate() / 256) * mpcore_margin;
+
+ /* Reload the counter */
+ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
diff --git a/target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch b/target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch
new file mode 100644
index 0000000..fa261ce
--- /dev/null
+++ b/target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch
@@ -0,0 +1,29 @@
+According to the include/linux/watchdog.h WDIOC_SETOPTIONS is
+classified as 'read from device' ioctl call:
+ #define WDIOC_SETOPTIONS _IOR(WATCHDOG_IOCTL_BASE, 4, int)
+
+However, the driver 'mpcore_wdt' performs 'copy_from_user' only if
+_IOC_WRITE is set, thus the local variable 'uarg' which is used in
+WDIOC_SETOPTIONS handling remains uninitialized.
+
+The proper way to fix this is to bind WDIOC_SETOPTIONS to _IOW,
+but this will break compatibility.
+So adding additional condition for performing 'copy_from_user'.
+
+Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com>
+---
+ drivers/watchdog/mpcore_wdt.c | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+--- a/drivers/watchdog/mpcore_wdt.c
++++ b/drivers/watchdog/mpcore_wdt.c
+@@ -233,7 +233,8 @@ static long mpcore_wdt_ioctl(struct file
+ if (_IOC_DIR(cmd) && _IOC_SIZE(cmd) > sizeof(uarg))
+ return -ENOTTY;
+
+- if (_IOC_DIR(cmd) & _IOC_WRITE) {
++ if ((_IOC_DIR(cmd) & _IOC_WRITE)
++ || cmd == WDIOC_SETOPTIONS) {
+ ret = copy_from_user(&uarg, (void __user *)arg, _IOC_SIZE(cmd));
+ if (ret)
+ return -EFAULT;
diff --git a/target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch b/target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch
new file mode 100644
index 0000000..0090923
--- /dev/null
+++ b/target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch
@@ -0,0 +1,57 @@
+Allow watchdog to set its iterrupt as pending when it is configured
+for timer mode (in other words, allow emitting interrupt).
+Also add macros for all Watchdog Control Register flags.
+
+Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com>
+---
+ arch/arm/include/asm/smp_twd.h | 6 ++++++
+ drivers/watchdog/mpcore_wdt.c | 15 +++++++++++----
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/smp_twd.h
++++ b/arch/arm/include/asm/smp_twd.h
+@@ -18,6 +18,12 @@
+ #define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
+ #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
+
++#define TWD_WDOG_CONTROL_ENABLE (1 << 0)
++#define TWD_WDOG_CONTROL_PERIODIC (1 << 1)
++#define TWD_WDOG_CONTROL_IT_ENABLE (1 << 2)
++#define TWD_WDOG_CONTROL_TIMER_MODE (0 << 3)
++#define TWD_WDOG_CONTROL_WATCHDOG_MODE (1 << 3)
++
+ struct clock_event_device;
+
+ extern void __iomem *twd_base;
+--- a/drivers/watchdog/mpcore_wdt.c
++++ b/drivers/watchdog/mpcore_wdt.c
+@@ -118,18 +118,25 @@ static void mpcore_wdt_stop(struct mpcor
+
+ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
+ {
++ u32 mode;
++
+ dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
+
+ /* This loads the count register but does NOT start the count yet */
+ mpcore_wdt_keepalive(wdt);
+
++ /* Setup watchdog - prescale=256, enable=1 */
++ mode = (255 << 8) | TWD_WDOG_CONTROL_ENABLE;
++
+ if (mpcore_noboot) {
+- /* Enable watchdog - prescale=256, watchdog mode=0, enable=1 */
+- writel(0x0000FF01, wdt->base + TWD_WDOG_CONTROL);
++ /* timer mode, send interrupt */
++ mode |= TWD_WDOG_CONTROL_TIMER_MODE |
++ TWD_WDOG_CONTROL_IT_ENABLE;
+ } else {
+- /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */
+- writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL);
++ /* watchdog mode */
++ mode |= TWD_WDOG_CONTROL_WATCHDOG_MODE;
+ }
++ writel(mode, wdt->base + TWD_WDOG_CONTROL);
+ }
+
+ static int mpcore_wdt_set_heartbeat(int t)