Checking in Jackson 0.9.1 baseline.

diff --git a/src/java/org/codehaus/jackson/JsonFactory.java b/src/java/org/codehaus/jackson/JsonFactory.java
new file mode 100644
index 0000000..c756f71
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonFactory.java
@@ -0,0 +1,165 @@
+package org.codehaus.jackson;
+
+import java.io.*;
+import java.lang.ref.SoftReference;
+import java.net.URL;
+
+import org.codehaus.jackson.io.*;
+import org.codehaus.jackson.impl.ReaderBasedParser;
+import org.codehaus.jackson.impl.WriterBasedGenerator;
+import org.codehaus.jackson.util.BufferRecycler;
+import org.codehaus.jackson.util.SymbolTable;
+
+public final class JsonFactory
+{
+    /**
+     * Legal JSON content always uses an Unicode encoding from the
+     * small list. As such we can just enumerate all legal types
+     * here
+     */
+    public enum Encoding {
+        UTF8("UTF-8"),
+            UTF16_BE("UTF-16BE"),
+            UTF16_LE("UTF-16LE"),
+            UTF32_BE("UTF-32BE"),
+            UTF32_LE("UTF-32LE")
+            ;
+
+        final String mJavaName;
+
+        Encoding(String javaName) { mJavaName = javaName; }
+
+        public String getJavaName() { return mJavaName; }
+    }
+
+    /**
+     * This <code>ThreadLocal</code> contains a {@link SoftRerefence}
+     * to a {@link BufferRecycler} used to provide a low-cost
+     * buffer recycling between reader and writer instances.
+     */
+    final static ThreadLocal<SoftReference<BufferRecycler>> mRecyclerRef = new ThreadLocal<SoftReference<BufferRecycler>>();
+
+    /**
+     * Each factory comes equipped with a shared root symbol table.
+     * It should not be linked back to the original blueprint, to
+     * avoid contents from leaking between factories.
+     */
+    private SymbolTable mCurrSymbolTable = SymbolTable.createRoot();
+
+    public JsonFactory() { }
+
+    /*
+    //////////////////////////////////////////////////////
+    // Reader factories
+    //////////////////////////////////////////////////////
+     */
+
+    public JsonParser createJsonParser(File f)
+        throws IOException, JsonParseException
+    {
+        IOContext ctxt = createContext(f);
+        Reader r = ByteSourceBootstrapper.bootstrap(ctxt, new FileInputStream(f));
+        return new ReaderBasedParser(ctxt, r, mCurrSymbolTable.makeChild());
+    }
+
+    public JsonParser createJsonParser(URL url)
+        throws IOException, JsonParseException
+    {
+        InputStream in = optimizedStreamFromURL(url);
+        IOContext ctxt = createContext(url);
+        Reader r = ByteSourceBootstrapper.bootstrap(ctxt, in);
+        return new ReaderBasedParser(ctxt, r, mCurrSymbolTable.makeChild());
+    }
+
+    /**
+     *<p>
+     * Note: no encoding argument is taken since it can always be
+     * auto-detected as suggested by Json RFC.
+     */
+    public JsonParser createJsonParser(InputStream in)
+        throws IOException, JsonParseException
+    {
+        IOContext ctxt = createContext(in);
+        Reader r = ByteSourceBootstrapper.bootstrap(ctxt, in);
+        return new ReaderBasedParser(ctxt, r, mCurrSymbolTable.makeChild());
+    }
+
+    public JsonParser createJsonParser(Reader r)
+        throws IOException, JsonParseException
+    {
+        IOContext ctxt = createContext(r);
+        return new ReaderBasedParser(ctxt, r, mCurrSymbolTable.makeChild());
+    }
+
+    /*
+    //////////////////////////////////////////////////////
+    // Generator factories
+    //////////////////////////////////////////////////////
+     */
+
+    public JsonGenerator createJsonGenerator(OutputStream out, Encoding enc)
+        throws IOException
+    {
+        IOContext ctxt = createContext(out);
+        ctxt.setEncoding(enc.getJavaName());
+        if (enc == Encoding.UTF8) { // We have optimized writer for UTF-8
+            return new WriterBasedGenerator(ctxt, new UTF8Writer(ctxt, out));
+        }
+        return new WriterBasedGenerator(ctxt, new OutputStreamWriter(out, enc.getJavaName()));
+    }
+
+    public JsonGenerator createJsonGenerator(Writer out)
+        throws IOException
+    {
+        IOContext ctxt = createContext(out);
+        return new WriterBasedGenerator(ctxt, out);
+    }
+
+    public JsonGenerator createJsonGenerator(File f, Encoding enc)
+        throws IOException
+    {
+        return createJsonGenerator(new FileOutputStream(f), enc);
+    }
+
+    /*
+    ///////////////////////////////////////////////////////////
+    // Internal methods
+    ///////////////////////////////////////////////////////////
+     */
+
+    protected IOContext createContext(Object srcRef)
+    {
+        return new IOContext(getBufferRecycler(), srcRef);
+    }
+
+    protected BufferRecycler getBufferRecycler()
+    {
+        SoftReference<BufferRecycler> ref = mRecyclerRef.get();
+        BufferRecycler br = (ref == null) ? null : ref.get();
+
+        if (br == null) {
+            br = new BufferRecycler();
+            if (ref == null) {
+                mRecyclerRef.set(new SoftReference<BufferRecycler>(br));
+            }
+        }
+        return br;
+    }
+
+    public static InputStream optimizedStreamFromURL(URL url)
+        throws IOException
+    {
+        if ("file".equals(url.getProtocol())) {
+            /* Can not do this if the path refers
+             * to a network drive on windows. This fixes the problem;
+             * might not be needed on all platforms (NFS?), but should not
+             * matter a lot: performance penalty of extra wrapping is more
+             * relevant when accessing local file system.
+             */
+            if (url.getHost() == null) {
+                return new FileInputStream(url.getPath());
+            }
+        }
+        return url.openStream();
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonGenerationException.java b/src/java/org/codehaus/jackson/JsonGenerationException.java
new file mode 100644
index 0000000..d3f46c6
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonGenerationException.java
@@ -0,0 +1,22 @@
+package org.codehaus.jackson;
+
+/**
+ * Class for exceptions during JSON writing, such as trying to output
+ * content in wrong context (non-matching end-array or end-object,
+ * for example)
+ */
+public class JsonGenerationException
+    extends JsonProcessingException
+{
+	final static long serialVersionUID = 123; // Stupid eclipse...
+
+	public JsonGenerationException(Throwable rootCause)
+    {
+        super(rootCause);
+    }
+
+	public JsonGenerationException(String msg)
+    {
+        super(msg, (JsonLocation)null);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonGenerator.java b/src/java/org/codehaus/jackson/JsonGenerator.java
new file mode 100644
index 0000000..fab2186
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonGenerator.java
@@ -0,0 +1,138 @@
+package org.codehaus.jackson;
+
+import java.io.*;
+import java.math.BigDecimal;
+
+/**
+ * This base class defines API for output JSON content.
+ */
+public abstract class JsonGenerator
+{
+    /**
+     * Object that handles pretty-printing (usually additional
+     * white space to make results more human-readable) during
+     * output. If null, no pretty-printing is done.
+     */
+    protected PrettyPrinter mPrettyPrinter;
+
+    protected JsonGenerator() { }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Configuring generator
+    ////////////////////////////////////////////////////
+      */
+
+    public final void setPrettyPrinter(PrettyPrinter pp) {
+        mPrettyPrinter = pp;
+    }
+
+    /**
+     * Convenience method for enabling pretty-printing using
+     * the default pretty printer
+     * ({@link org.codehaus.jackson.impl.DefaultPrettyPrinter}).
+     */
+    public abstract void useDefaultPrettyPrinter();
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, write methods, structural
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract void writeStartArray()
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeEndArray()
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeStartObject()
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeEndObject()
+        throws IOException, JsonGenerationException;
+
+    protected abstract void doWriteEndObject()
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeFieldName(String name)
+        throws IOException, JsonGenerationException;
+
+    protected abstract void doWriteFieldName(String name, boolean commaBefore)
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, write methods, textual/binary
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract void writeString(String text)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeString(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Fallback method which can be used to make generator copy
+     * input text verbatim with no modifications
+     */
+    public abstract void writeRaw(String text)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeRaw(String text, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeRaw(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeRaw(char c)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method that will output given chunk of binary data as base64
+     * encoded, as a complete String value (surrounded by double quotes).
+     * Note: 
+     */
+    public abstract void writeBinary(byte[] data, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, write methods, primitive
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract void writeNumber(int i)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(long l)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(double d)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(float f)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(BigDecimal dec)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeBoolean(boolean state)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNull()
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, buffer handling
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract void flush()
+        throws IOException;
+
+    public abstract void close()
+        throws IOException;
+}
diff --git a/src/java/org/codehaus/jackson/JsonLocation.java b/src/java/org/codehaus/jackson/JsonLocation.java
new file mode 100644
index 0000000..974d436
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonLocation.java
@@ -0,0 +1,55 @@
+package org.codehaus.jackson;
+
+public class JsonLocation
+{
+    final long mTotalChars;
+
+    final int mLineNr;
+    final int mColumnNr;
+
+    /**
+     * Displayable description for input source: file path, url
+     */
+    final Object mSourceRef;
+
+    public JsonLocation(Object srcRef, long totalChars, int lineNr, int colNr)
+    {
+        mTotalChars = totalChars;
+        mLineNr = lineNr;
+        mColumnNr = colNr;
+        mSourceRef = srcRef;
+    }
+
+    public Object getSourceRef() { return mSourceRef; }
+    public int getLineNr() { return mLineNr; }
+    public int getColumnNr() { return mColumnNr; }
+
+    public long getCharOffset() { return mTotalChars; }
+
+    public long getByteOffset()
+    {
+        /* Unfortunately, none of legal encodings are straight single-byte
+         * encodings. Could determine offset for UTF-16/UTF-32, but the
+         * most important one is UTF-8... so for now, let's just not
+         * report anything.
+         */
+        return -1;
+    }
+
+    public String toString()
+    {
+        StringBuilder sb = new StringBuilder(80);
+        sb.append("[Source: ");
+        if (mSourceRef == null) {
+            sb.append("UNKNOWN");
+        } else {
+            sb.append(mSourceRef.toString());
+        }
+        sb.append("; line: ");
+        sb.append(mLineNr);
+        sb.append(", column: ");
+        sb.append(mColumnNr);
+        sb.append(']');
+        return sb.toString();
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonParseException.java b/src/java/org/codehaus/jackson/JsonParseException.java
new file mode 100644
index 0000000..1fdecf3
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonParseException.java
@@ -0,0 +1,21 @@
+package org.codehaus.jackson;
+
+/**
+ * Class for parsing exceptions, to indicate non-well-formed document
+ * content
+ */
+public class JsonParseException
+    extends JsonProcessingException
+{
+	final static long serialVersionUID = 123; // Stupid eclipse...
+
+	public JsonParseException(String msg, JsonLocation loc)
+    {
+        super(msg, loc);
+    }
+
+    public JsonParseException(String msg, JsonLocation loc, Throwable root)
+    {
+        super(msg, loc, root);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonParser.java b/src/java/org/codehaus/jackson/JsonParser.java
new file mode 100644
index 0000000..a2a09ec
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonParser.java
@@ -0,0 +1,158 @@
+package org.codehaus.jackson;
+
+import java.io.*;
+import java.math.BigDecimal;
+
+/**
+ * This is the public API implemented by concrete JSON parser instances.
+ *
+ * @author Tatu Saloranta
+ */
+public abstract class JsonParser
+{
+    /**
+     * Enumeration of possible "native" (optimal) types that can be
+     * used for numbers.
+     */
+    public enum NumberType {
+        INT, LONG, BIG_INTEGER, FLOAT, DOUBLE, BIG_DECIMAL
+    };
+
+    protected JsonParser() { }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, traversal
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * @return Next token from the stream, if any found, or null
+     *   to indicate end-of-input
+     */
+    public abstract JsonToken nextToken()
+        throws IOException, JsonParseException;
+
+    /**
+     * @return Type of the token this parser currently points to,
+     *   if any: null both before any tokens have been read, and
+     *   after end-of-input has been encountered.
+     */
+    public abstract JsonToken getCurrentToken();
+
+    public abstract boolean hasCurrentToken();
+
+    /**
+     * Method that can be called to get the name associated with
+     * the current event. Will return null for all token types
+     * except for {@link JsonToken#FIELD_NAME}.
+     */
+    public abstract String getCurrentName()
+        throws IOException, JsonParseException;
+
+    public abstract void close()
+        throws IOException;
+
+    /**
+     * Method that can be used to access current parsing context reader
+     * is in. There are 3 different types: root, array and object contexts,
+     * with slightly different available information. Contexts are
+     * hierarchically nested, and can be used for example for figuring
+     * out part of the input document that correspond to specific
+     * array or object (for highlighting purposes, or error reporting).
+     * Contexts can also be used for simple xpath-like matching of
+     * input, if so desired.
+     */
+    public abstract JsonReadContext getParsingContext();
+
+    /**
+     * Method that return the <b>starting</b> location of the current
+     * token; that is, position of the first character from input
+     * that starts the current token.
+     */
+    public abstract JsonLocation getTokenLocation();
+
+    /**
+     * Method that returns location of the last processed character;
+     * usually for error reporting purposes.
+     */
+    public abstract JsonLocation getCurrentLocation();
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, access to token information, text
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method for accessing textual representation of the current event;
+     * if no current event (before first call to {@link #nextToken}, or
+     * after encountering end-of-input), returns null.
+     * Method can be called for any event.
+     */
+    public abstract String getText()
+        throws IOException, JsonParseException;
+
+    public abstract char[] getTextCharacters()
+        throws IOException, JsonParseException;
+
+    public abstract int getTextLength()
+        throws IOException, JsonParseException;
+
+    public abstract int getTextOffset()
+        throws IOException, JsonParseException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, access to token information, numeric
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Generic number value accessor method that will work for
+     * all kinds of numeric values. It will return the optimal
+     * (simplest/smallest possibl) wrapper object that can
+     * express the numeric value just parsed.
+     */
+    public abstract Number getNumberValue()
+        throws IOException, JsonParseException;
+
+    /**
+     * If current event is of type 
+     * {@link JsonToken#VALUE_NUMBER_INT} or
+     * {@link JsonToken#VALUE_NUMBER_FLOAT}, returns
+     * one of type constants; otherwise returns null.
+     */
+    public abstract NumberType getNumberType()
+        throws IOException, JsonParseException;
+
+    /**
+     * Numeric accessor that can be called when the current
+     * token is of type {@link JsonToken#VALUE_NUMBER_INT} and
+     * it can be expressed as a Java int primitive type.
+     *<p>
+     * Note: if the token is an integer, but its value falls
+     * outside of range of Java int, a {@link JsonParseException}
+     * will be thrown to indicate numeric overflow/underflow.
+     */
+    public abstract int getIntValue()
+        throws IOException, JsonParseException;
+
+    /**
+     * Numeric accessor that can be called when the current
+     * token is of type {@link JsonToken#VALUE_NUMBER_INT} and
+     * it can be expressed as a Java long primitive type.
+     *<p>
+     * Note: if the token is an integer, but its value falls
+     * outside of range of Java long, a {@link JsonParseException}
+     * will be thrown to indicate numeric overflow/underflow.
+     */
+    public abstract long getLongValue()
+        throws IOException, JsonParseException;
+
+    public abstract double getDoubleValue()
+        throws IOException, JsonParseException;
+
+    public abstract BigDecimal getDecimalValue()
+        throws IOException, JsonParseException;
+}
diff --git a/src/java/org/codehaus/jackson/JsonProcessingException.java b/src/java/org/codehaus/jackson/JsonProcessingException.java
new file mode 100644
index 0000000..29cedca
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonProcessingException.java
@@ -0,0 +1,66 @@
+package org.codehaus.jackson;
+
+/**
+ * Intermediate base class for all problems when processing
+ * JSON input or output that are not pure I/O exceptions
+ */
+public class JsonProcessingException
+    extends java.io.IOException
+{
+	final static long serialVersionUID = 123; // Stupid eclipse...
+	
+    protected JsonLocation mLocation;
+
+    protected JsonProcessingException(String msg, JsonLocation loc, Throwable rootCause)
+    {
+        super(msg);
+        if (rootCause != null) {
+            initCause(rootCause);
+        }
+        mLocation = loc;
+    }
+
+    protected JsonProcessingException(String msg, JsonLocation loc)
+    {
+        this(msg, loc, null);
+    }
+
+    protected JsonProcessingException(String msg, Throwable rootCause)
+    {
+        this(msg, null, rootCause);
+    }
+
+    protected JsonProcessingException(Throwable rootCause)
+    {
+        this(null, null, rootCause);
+    }
+
+    public JsonLocation getLocation()
+    {
+        return mLocation;
+    }
+
+    /**
+     * Default method overridden so that we can add location information
+     */
+    @Override
+    public String getMessage()
+    {
+        String msg = super.getMessage();
+        JsonLocation loc = getLocation();
+        if (loc != null) {
+            StringBuilder sb = new StringBuilder();
+            sb.append(msg);
+            sb.append('\n');
+            sb.append(" at ");
+            sb.append(loc.toString());
+            return sb.toString();
+        }
+        return msg;
+    }
+
+    @Override
+    public String toString() {
+        return getClass().getName()+": "+getMessage();
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonReadContext.java b/src/java/org/codehaus/jackson/JsonReadContext.java
new file mode 100644
index 0000000..573daed
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonReadContext.java
@@ -0,0 +1,333 @@
+package org.codehaus.jackson;
+
+import org.codehaus.jackson.impl.JsonParserBase;
+import org.codehaus.jackson.util.CharTypes;
+
+/**
+ * Context object is used to keep track of relative logical position
+ * of the current event when parsing.
+ */
+public abstract class JsonReadContext
+{
+    protected final static int INT_COLON = ':';
+    protected final static int INT_COMMA = ',';
+
+    /**
+     * Type of the context
+     */
+    public enum Type {
+        ROOT, OBJECT, ARRAY;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Return codes for methods that verify which separator
+    // is used for which kind of scope.
+    // Reason for using ints over enums is that enum handling
+    // appears slower than int handling for switch statements
+    ////////////////////////////////////////////////////
+     */
+
+    public final static int HANDLED_EXPECT_NAME = 0;
+    public final static int HANDLED_EXPECT_VALUE = 1;
+    public final static int MISSING_COMMA = 2;
+    public final static int MISSING_COLON = 3;
+    public final static int NOT_EXP_SEPARATOR_NEED_VALUE = 4;
+    public final static int NOT_EXP_SEPARATOR_NEED_NAME = 5;
+
+    protected final JsonReadContext mParent;
+
+    /**
+     * Index of the currently processed entry. Starts with -1 to signal
+     * that no entries have been started, and gets advanced each
+     * time a new entry is started, either by encountering an expected
+     * separator, or with new values if no separators are expected
+     * (the case for root context).
+     */
+    protected int mIndex;
+
+    // // // Location information (minus source reference)
+
+    long mTotalChars;
+
+    int mLineNr;
+    int mColumnNr;
+
+    /*
+    //////////////////////////////////////////////////
+    // Simple instance reuse slots; speed up things
+    // a bit (10-15%) for docs with lots of small
+    // arrays/objects
+    //////////////////////////////////////////////////
+     */
+
+    JsonReadContext mChildArray = null;
+
+    JsonReadContext mChildObject = null;
+
+    /*
+    //////////////////////////////////////////////////
+    // Life-cycle
+    //////////////////////////////////////////////////
+     */
+
+    public JsonReadContext(JsonReadContext parent, JsonParserBase ir)
+    {
+        mParent = parent;
+        mIndex = -1;
+        mTotalChars = ir.getTokenCharacterOffset();
+        mLineNr = ir.getTokenLineNr();
+        mColumnNr = ir.getTokenColumnNr();
+    }
+
+    private final void resetLocation(JsonParserBase ir)
+    {
+        mIndex = -1;
+        mTotalChars = ir.getTokenCharacterOffset();
+        mLineNr = ir.getTokenLineNr();
+        mColumnNr = ir.getTokenColumnNr();
+    }
+
+    // // // Factory methods
+
+    public static JsonReadContext createRootContext(JsonParserBase ir)
+    {
+        return new RootRContext(ir);
+    }
+
+    public final JsonReadContext createChildArrayContext(JsonParserBase ir)
+    {
+        JsonReadContext ctxt = mChildArray;
+        if (ctxt == null) {
+            mChildArray = ctxt = new ArrayRContext(this, ir);
+        } else {
+            ctxt.resetLocation(ir);
+        }
+        return ctxt;
+    }
+
+    public final JsonReadContext createChildObjectContext(JsonParserBase ir)
+    {
+        JsonReadContext ctxt = mChildObject;
+        if (ctxt == null) {
+            mChildObject = ctxt = new ObjectRContext(this, ir);
+        } else {
+            ctxt.resetLocation(ir);
+        }
+        return ctxt;
+    }
+
+    // // // Shared API
+
+    public final JsonReadContext getParent() { return mParent; }
+
+    public final boolean isRoot() { return mParent == null; }
+
+    /**
+     * @return Number of entries that are complete and started.
+     */
+    public final int getEntryCount()
+    {
+        return mIndex+1;
+    }
+
+    /**
+     * @return Index of the currently processed entry, if any
+     */
+    public final int getCurrentIndex()
+    {
+        return (mIndex < 0) ? 0 : mIndex;
+    }
+
+    /**
+     * @return Location pointing to the point where the context
+     *   start marker was found
+     */
+    public final JsonLocation getStartLocation(Object srcRef)
+    {
+        return new JsonLocation(srcRef, mTotalChars, mLineNr, mColumnNr);
+    }
+
+    // // // API sub-classes are to implement
+
+    public abstract Type getType();
+    public abstract boolean isArray();
+    public abstract boolean isObject();
+
+    public final String getTypeDesc() { return getType().toString(); }
+
+    public abstract int handleSeparator(int ch);
+
+    public abstract String getCurrentName();
+
+    // // // Internally used abstract methods
+
+    protected abstract void appendDesc(StringBuilder sb);
+
+    /**
+     * Method only to be called in the object context
+     */
+    public void setCurrentName(String name) {
+        throw new IllegalStateException("Can not call setCurrentName() for "+getTypeDesc());
+    }
+
+    // // // Overridden standard methods
+
+    /**
+     * Overridden to provide developer readable "JsonPath" representation
+     * of the context.
+     */
+    public final String toString()
+    {
+        StringBuilder sb = new StringBuilder(64);
+        appendDesc(sb);
+        return sb.toString();
+    }
+}
+
+/**
+ * Root context is simple, as only state it keeps is the index of
+ * the currently active entry.
+ */
+final class RootRContext
+    extends JsonReadContext
+{
+    public RootRContext(JsonParserBase ir)
+    {
+        super(null, ir);
+    }
+
+    public Type getType() { return Type.ROOT; }
+    public boolean isArray() { return false; }
+    public boolean isObject() { return false; }
+
+    public String getCurrentName() { return null; }
+
+    public int handleSeparator(int ch)
+    {
+        // Starting of a new entry is implied
+        ++mIndex;
+        return NOT_EXP_SEPARATOR_NEED_VALUE;
+    }
+
+    protected void appendDesc(StringBuilder sb)
+    {
+        sb.append("/");
+    }
+}
+
+final class ArrayRContext
+    extends JsonReadContext
+{
+    public ArrayRContext(JsonReadContext parent, JsonParserBase ir)
+    {
+        super(parent, ir);
+    }
+
+    public Type getType() { return Type.ARRAY; }
+    public boolean isArray() { return true; }
+    public boolean isObject() { return false; }
+
+    public String getCurrentName() { return null; }
+
+    /**
+     * State handling for arrays is simple, the only consideration is
+     * for the first entry, which does not take leading comma.
+     */
+    public int handleSeparator(int ch)
+    {
+        // New entry, first or not?
+        int ix = mIndex;
+        ++mIndex;
+        if (ix < 0) {
+            return NOT_EXP_SEPARATOR_NEED_VALUE;
+        }
+        // Other than first, must get comma first
+        if (ch == INT_COMMA) {
+            return HANDLED_EXPECT_VALUE;
+        }
+        return MISSING_COMMA;
+    }
+
+    protected void appendDesc(StringBuilder sb)
+    {
+        sb.append('[');
+        sb.append(getCurrentIndex());
+        sb.append(']');
+    }
+}
+
+final class ObjectRContext
+    extends JsonReadContext
+{
+    /**
+     * Name of the field of which value is to be parsed.
+     */
+    protected String mCurrentName;
+
+    /**
+     * Flag to indicate that the context just received the
+     * field name, and is to get a value next
+     */
+    protected boolean mExpectValue;
+
+    public ObjectRContext(JsonReadContext parent, JsonParserBase ir)
+    {
+        super(parent, ir);
+        mCurrentName = null;
+        mExpectValue = false;
+    }
+
+    public Type getType() { return Type.OBJECT; }
+    public boolean isArray() { return false; }
+    public boolean isObject() { return true; }
+
+    public String getCurrentName() { return mCurrentName; }
+
+    /**
+     * Objects ("maps") have the most complicated state handling, so
+     * we get to return any of the constant, depending on exactly
+     * where we are.
+     */
+    public int handleSeparator(int ch)
+    {
+        if (mExpectValue) { // have name, expecting ':' followed by value
+            if (ch == INT_COLON) {
+                mExpectValue = false;
+                return HANDLED_EXPECT_VALUE;
+            }
+            return MISSING_COLON;
+        }
+        // New entry, entries start with name. But is it the first or not?
+        if (mIndex < 0) { // First; no separator expected
+            mExpectValue = true;
+            return NOT_EXP_SEPARATOR_NEED_NAME;
+        }
+        // Other than first, must get comma first
+        if (ch == INT_COMMA) {
+            mExpectValue = true;
+            return HANDLED_EXPECT_NAME;
+        }
+        return MISSING_COMMA;
+    }
+
+    @Override
+    public void setCurrentName(String name)
+    {
+        mCurrentName = name;
+        ++mIndex; // so that we can deal with comma
+    }
+
+    protected void appendDesc(StringBuilder sb)
+    {
+        sb.append('{');
+        if (mCurrentName != null) {
+            sb.append('"');
+            CharTypes.appendQuoted(sb, mCurrentName);
+            sb.append('"');
+        } else {
+            sb.append('?');
+        }
+        sb.append(']');
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonToken.java b/src/java/org/codehaus/jackson/JsonToken.java
new file mode 100644
index 0000000..651ee48
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonToken.java
@@ -0,0 +1,105 @@
+package org.codehaus.jackson;
+
+/**
+ * This enumeration defines basic token types that are results of parsing
+ * JSON content.
+ */
+public enum JsonToken
+{
+    /**
+     * START_OBJECT is returned when encountering '{'
+     * which signals starting of an Object value
+     */
+    START_OBJECT("{"),
+        
+    /**
+     * START_OBJECT is returned when encountering '}'
+     * which signals ending of an Object value
+     */
+        END_OBJECT("}"),
+        
+    /**
+     * START_OBJECT is returned when encountering '['
+     * which signals starting of an Array value
+     */
+        START_ARRAY("["),
+
+    /**
+     * START_OBJECT is returned when encountering ']'
+     * which signals ending of an Array value
+     */
+        END_ARRAY("]"),
+        
+     /**
+      * FIELD_NAME is returned when a String token is encountered
+      * as a field name (same lexical value, different function)
+      */
+        FIELD_NAME(null),
+        
+     /**
+      * VALUE_STRING is returned when a String token is encountered
+      * in value context (array element, field value, or root-level
+      * stand-alone value)
+      */
+        VALUE_STRING(null),
+
+     /**
+      * VALUE_NUMBER_INT is returned when an integer numeric token is
+      * encountered in value context: that is, a number that does
+      * not have floating point or exponent marker in it (consists
+      * only of an optional sign, followed by one or more digits)
+      */
+        VALUE_NUMBER_INT(null),
+
+    /**
+     * VALUE_NUMBER_INT is returned when a numeric token other
+     * that is not an integer is encountered: that is, a number that does
+     * have floating point or exponent marker in it, in addition
+     * to one or more digits.
+     */
+        VALUE_NUMBER_FLOAT(null),
+
+    /**
+     * VALUE_TRUE is returned when encountering literal "true" in
+     * value context
+     */
+        VALUE_TRUE("true"),
+
+    /**
+     * VALUE_FALSE is returned when encountering literal "false" in
+     * value context
+     */
+        VALUE_FALSE("false"),
+
+    /**
+     * VALUE_NULL is returned when encountering literal "null" in
+     * value context
+     */
+        VALUE_NULL("null");
+
+    final String mSerialized;
+
+    final char[] mSerializedChars;
+
+    /**
+     * @param Textual representation for this token, if there is a
+     *   single static representation; null otherwise
+     */
+    JsonToken(String token)
+    {
+        if (token == null) {
+            mSerialized = null;
+            mSerializedChars = null;
+        } else {
+            mSerialized = token;
+            mSerializedChars = token.toCharArray();
+        }
+    }
+
+    public String asString() { return mSerialized; }
+    public char[] asCharArray() { return mSerializedChars; }
+
+    public boolean isNumeric() {
+        return (this == VALUE_NUMBER_INT) || (this == VALUE_NUMBER_FLOAT);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/JsonWriteContext.java b/src/java/org/codehaus/jackson/JsonWriteContext.java
new file mode 100644
index 0000000..df13580
--- /dev/null
+++ b/src/java/org/codehaus/jackson/JsonWriteContext.java
@@ -0,0 +1,269 @@
+package org.codehaus.jackson;
+
+/**
+ * Context object is used to keep track of relative logical position
+ * of the current event when generating json content.
+ */
+public abstract class JsonWriteContext
+{
+    /**
+     * Type of the context
+     */
+    public enum Type {
+        ROOT, OBJECT, ARRAY;
+    }
+
+    // // // Return values for writeValue()
+
+    public final static int STATUS_OK_AS_IS = 0;
+    public final static int STATUS_OK_AFTER_COMMA = 1;
+    public final static int STATUS_OK_AFTER_COLON = 2;
+    public final static int STATUS_OK_AFTER_SPACE = 3; // in root context
+    public final static int STATUS_EXPECT_VALUE = 4;
+    public final static int STATUS_EXPECT_NAME = 5;
+
+    protected final JsonWriteContext mParent;
+
+    /**
+     * Index of the currently processed entry. Starts with -1 to signal
+     * that no entries have been started, and gets advanced each
+     * time a new entry is started.
+     */
+    protected int mIndex;
+
+    /*
+    //////////////////////////////////////////////////
+    // Simple instance reuse slots; speed up things
+    // a bit (10-15%) for docs with lots of small
+    // arrays/objects
+    //////////////////////////////////////////////////
+     */
+
+    JsonWriteContext mChildArray = null;
+
+    JsonWriteContext mChildObject = null;
+
+    /*
+    //////////////////////////////////////////////////
+    // Life-cycle
+    //////////////////////////////////////////////////
+     */
+
+    public JsonWriteContext(JsonWriteContext parent)
+    {
+        mParent = parent;
+        mIndex = -1;
+    }
+
+    // // // Factory methods
+
+    public static JsonWriteContext createRootContext()
+    {
+        return new RootWContext();
+    }
+
+    public final JsonWriteContext createChildArrayContext()
+    {
+        JsonWriteContext ctxt = mChildArray;
+        if (ctxt == null) {
+            mChildArray = ctxt = new ArrayWContext(this);
+        } else { // need to reset settings; parent is already ok
+            ctxt.mIndex = -1;
+        }
+        return ctxt;
+    }
+
+    public final JsonWriteContext createChildObjectContext()
+    {
+        JsonWriteContext ctxt = mChildObject;
+        if (ctxt == null) {
+            mChildObject = ctxt = new ObjectWContext(this);
+        } else { // need to reset settings; parent is already ok
+            ctxt.mIndex = -1;
+        }
+        return ctxt;
+    }
+
+    // // // Shared API
+
+    public final JsonWriteContext getParent() { return mParent; }
+
+    public final boolean isRoot() { return mParent == null; }
+
+    public final int getEntryCount()
+    {
+        return mIndex+1;
+    }
+
+    /**
+     * @return Index of the currently processed entry, if any
+     */
+    public final int getCurrentIndex()
+    {
+        return (mIndex < 0) ? 0 : mIndex;
+    }
+
+    // // // API sub-classes are to implement
+
+    public abstract Type getType();
+
+    /**
+     * Method that writer is to call before it writes a field name.
+     *
+     * @return Index of the field entry (0-based)
+     */
+    public abstract int writeFieldName(String name);
+
+    public abstract int writeValue();
+
+    public boolean inArray() { return false; }
+
+    public boolean inObject() { return false; }
+
+    // // // Internally used abstract methods
+
+    protected abstract void appendDesc(StringBuilder sb);
+
+    // // // Overridden standard methods
+
+    /**
+     * Overridden to provide developer writeable "JsonPath" representation
+     * of the context.
+     */
+    public final String toString()
+    {
+        StringBuilder sb = new StringBuilder(64);
+        appendDesc(sb);
+        return sb.toString();
+    }
+}
+
+/**
+ * Root context is simple, as only state it keeps is the index of
+ * the currently active entry.
+ */
+final class RootWContext
+    extends JsonWriteContext
+{
+    public RootWContext()
+    {
+        super(null);
+    }
+
+    public Type getType() { return Type.ROOT; }
+
+    public String getCurrentName() { return null; }
+
+    public int writeFieldName(String name)
+    {
+        return STATUS_EXPECT_VALUE;
+    }
+
+    public int writeValue()
+    {
+        // No commas within root context, but need space
+        ++mIndex;
+        return (mIndex == 0) ? STATUS_OK_AS_IS : STATUS_OK_AFTER_SPACE;
+    }
+
+    protected void appendDesc(StringBuilder sb)
+    {
+        sb.append("/");
+    }
+}
+
+final class ArrayWContext
+    extends JsonWriteContext
+{
+    public ArrayWContext(JsonWriteContext parent)
+    {
+        super(parent);
+    }
+
+    public Type getType() { return Type.ARRAY; }
+
+    public String getCurrentName() { return null; }
+
+    public int writeFieldName(String name)
+    {
+        return STATUS_EXPECT_VALUE;
+    }
+
+    public int writeValue()
+    {
+        int ix = mIndex;
+        ++mIndex;
+        return (ix < 0) ? STATUS_OK_AS_IS : STATUS_OK_AFTER_COMMA;
+    }
+
+    public boolean inArray() { return true; }
+
+    protected void appendDesc(StringBuilder sb)
+    {
+        sb.append('[');
+        sb.append(getCurrentIndex());
+        sb.append(']');
+    }
+}
+
+final class ObjectWContext
+    extends JsonWriteContext
+{
+    /**
+     * Name of the field of which value is to be parsed.
+     */
+    protected String mCurrentName;
+
+    /**
+     * Flag to indicate that the context just received the
+     * field name, and is to get a value next
+     */
+    protected boolean mExpectValue;
+
+    public ObjectWContext(JsonWriteContext parent)
+    {
+        super(parent);
+        mCurrentName = null;
+        mExpectValue = false;
+    }
+
+    public Type getType() { return Type.OBJECT; }
+
+    public String getCurrentName() { return mCurrentName; }
+
+    public int writeFieldName(String name)
+    {
+        if (mCurrentName != null) { // just wrote a name...
+            return STATUS_EXPECT_VALUE;
+        }
+        mCurrentName = name;
+        return (mIndex < 0) ? STATUS_OK_AS_IS : STATUS_OK_AFTER_COMMA;
+    }
+
+    public int writeValue()
+    {
+        if (mCurrentName == null) {
+            return STATUS_EXPECT_NAME;
+        }
+        mCurrentName = null;
+        ++mIndex;
+        return STATUS_OK_AFTER_COLON;
+    }
+
+    public boolean inObject() { return true; }
+
+    protected void appendDesc(StringBuilder sb)
+    {
+        sb.append('{');
+        if (mCurrentName != null) {
+            sb.append('"');
+            // !!! TODO: Name chars should be escaped?
+            sb.append(mCurrentName);
+            sb.append('"');
+        } else {
+            sb.append('?');
+        }
+        sb.append(']');
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/PrettyPrinter.java b/src/java/org/codehaus/jackson/PrettyPrinter.java
new file mode 100644
index 0000000..5b1109c
--- /dev/null
+++ b/src/java/org/codehaus/jackson/PrettyPrinter.java
@@ -0,0 +1,165 @@
+package org.codehaus.jackson;
+
+import java.io.IOException;
+
+/**
+ * Interface for pretty printer instances. Pretty printers are used
+ * to add white space in output JSON content, to make results
+ * more human readable. Usually this means things like adding
+ * linefeeds and indentation.
+ */
+public interface PrettyPrinter
+{
+    /*
+    //////////////////////////////////////////////////////
+    // First methods that act both as events, and expect
+    // output for correct functioning (i.e something gets
+    // output even when not pretty-printing)
+    //////////////////////////////////////////////////////
+     */
+
+    // // // Root-level handling:
+
+    /**
+     * Method called after a root-level value has been completely
+     * output, and before another value is to be output.
+     *<p>
+     * Default
+     * handling (without pretty-printing) will output a space, to
+     * allow values to be parsed correctly. Pretty-printer is
+     * to output some other suitable and nice-looking separator
+     * (tab(s), space(s), linefeed(s) or any combination thereof).
+     */
+    public void writeRootValueSeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    // // Object handling
+
+    /**
+     * Method called when an Object value is to be output, before
+     * any fields are output.
+     *<p>
+     * Default handling (without pretty-printing) will output
+     * the opening curly bracket.
+     * Pretty-printer is
+     * to output a curly bracket as well, but can surround that
+     * with other (white-space) decoration.
+     */
+    public void writeStartObject(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method called after an Object value has been completely output
+     * (minus closing curly bracket).
+     *<p>
+     * Default handling (without pretty-printing) will output
+     * the closing curly bracket.
+     * Pretty-printer is
+     * to output a curly bracket as well, but can surround that
+     * with other (white-space) decoration.
+     *
+     * @param nrOfEntries Number of direct members of the array that
+     *   have been output
+     */
+    public void writeEndObject(JsonGenerator jg, int nrOfEntries)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method called after an object entry (field:value) has been completely
+     * output, and before another value is to be output.
+     *<p>
+     * Default handling (without pretty-printing) will output a single
+     * comma to separate the two. Pretty-printer is
+     * to output a comma as well, but can surround that with other
+     * (white-space) decoration.
+     */
+    public void writeObjectEntrySeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method called after an object field has been output, but
+     * before the value is output.
+     *<p>
+     * Default handling (without pretty-printing) will output a single
+     * colon to separate the two. Pretty-printer is
+     * to output a colon as well, but can surround that with other
+     * (white-space) decoration.
+     */
+    public void writeObjectFieldValueSeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    // // // Array handling
+
+    /**
+     * Method called when an Array value is to be output, before
+     * any member/child values are output.
+     *<p>
+     * Default handling (without pretty-printing) will output
+     * the opening bracket.
+     * Pretty-printer is
+     * to output a bracket as well, but can surround that
+     * with other (white-space) decoration.
+     */
+    public void writeStartArray(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method called after an Array value has been completely output
+     * (minus closing bracket).
+     *<p>
+     * Default handling (without pretty-printing) will output
+     * the closing bracket.
+     * Pretty-printer is
+     * to output a bracket as well, but can surround that
+     * with other (white-space) decoration.
+     *
+     * @param nrOfValues Number of direct members of the array that
+     *   have been output
+     */
+    public void writeEndArray(JsonGenerator jg, int nrOfValues)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method called after an array value has been completely
+     * output, and before another value is to be output.
+     *<p>
+     * Default handling (without pretty-printing) will output a single
+     * comma to separate the two. Pretty-printer is
+     * to output a comma as well, but can surround that with other
+     * (white-space) decoration.
+     */
+    public void writeArrayValueSeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    /*
+    //////////////////////////////////////////////////////
+    // Then events that by default do not produce any output
+    // but that are often overridden to add white space
+    // in pretty-printing mode
+    //////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method called after array start marker has been output,
+     * and right before the first value is to be output.
+     * It is <b>not</b> called for arrays with no values.
+     *<p>
+     * Default handling does not output anything, but pretty-printer
+     * is free to add any white space decoration.
+     */
+    public void beforeArrayValues(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * Method called after object start marker has been output,
+     * and right before the field name of the first entry is
+     * to be output.
+     * It is <b>not</b> called for objects without entries.
+     *<p>
+     * Default handling does not output anything, but pretty-printer
+     * is free to add any white space decoration.
+     */
+    public void beforeObjectEntries(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+}
+
diff --git a/src/java/org/codehaus/jackson/impl/DefaultPrettyPrinter.java b/src/java/org/codehaus/jackson/impl/DefaultPrettyPrinter.java
new file mode 100644
index 0000000..245a956
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/DefaultPrettyPrinter.java
@@ -0,0 +1,257 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+import java.util.Arrays;
+
+import org.codehaus.jackson.*;
+
+public class DefaultPrettyPrinter
+    implements PrettyPrinter
+{
+    // // // Config, indentation
+
+    /**
+     * By default, let's use only spaces to separate array values.
+     */
+    protected Indenter mArrayIndenter = new FixedSpaceIndenter();
+
+    /**
+     * By default, let's use linefeed-adding indenter for separate
+     * object entries. We'll further configure indenter to use
+     * system-specific linefeeds, and 2 spaces per level (as opposed to,
+     * say, single tabs)
+     */
+    protected Indenter mObjectIndenter = new Lf2SpacesIndenter();
+
+    // // // Config, other white space configuration
+
+    /**
+     * By default we will add spaces around colons used to
+     * separate object fields and values.
+     * If disabled, will not use spaces around colon.
+     */
+    protected boolean mSpacesInObjectEntries = true;
+
+    // // // State:
+
+    /**
+     * Number of open levels of nesting. Used to determine amount of
+     * indentation to use.
+     */
+    protected int mNesting = 0;
+
+    /*
+    ////////////////////////////////////////////////////////////
+    // Life-cycle (construct, configure)
+    ////////////////////////////////////////////////////////////
+    */
+
+    public DefaultPrettyPrinter() { }
+
+    public void indentArraysWith(Indenter i)
+    {
+        mArrayIndenter = (i == null) ? new NopIndenter() : i;
+    }
+
+    public void indentObjectsWith(Indenter i)
+    {
+        mObjectIndenter = (i == null) ? new NopIndenter() : i;
+    }
+
+    public void spacesInObjectEntries(boolean b) { mSpacesInObjectEntries = b; }
+    /*
+    ////////////////////////////////////////////////////////////
+    // PrettyPrinter impl
+    ////////////////////////////////////////////////////////////
+     */
+
+    public void writeRootValueSeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeRaw(' ');
+    }
+
+    public void writeStartObject(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeRaw('{');
+        if (!mObjectIndenter.isInline()) {
+            ++mNesting;
+        }
+    }
+
+    public void beforeObjectEntries(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        mObjectIndenter.writeIndentation(jg, mNesting);
+    }
+
+    /**
+     * Method called after an object field has been output, but
+     * before the value is output.
+     *<p>
+     * Default handling (without pretty-printing) will output a single
+     * colon to separate the two. Pretty-printer is
+     * to output a colon as well, but can surround that with other
+     * (white-space) decoration.
+     */
+    public void writeObjectFieldValueSeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        if (mSpacesInObjectEntries) {
+            jg.writeRaw(" : ");
+        } else {
+            jg.writeRaw(':');
+        }
+    }
+
+    /**
+     * Method called after an object entry (field:value) has been completely
+     * output, and before another value is to be output.
+     *<p>
+     * Default handling (without pretty-printing) will output a single
+     * comma to separate the two. Pretty-printer is
+     * to output a comma as well, but can surround that with other
+     * (white-space) decoration.
+     */
+    public void writeObjectEntrySeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeRaw(',');
+        mObjectIndenter.writeIndentation(jg, mNesting);
+    }
+
+    public void writeEndObject(JsonGenerator jg, int nrOfEntries)
+        throws IOException, JsonGenerationException
+    {
+        if (!mObjectIndenter.isInline()) {
+            --mNesting;
+        }
+        if (nrOfEntries > 0) {
+            mObjectIndenter.writeIndentation(jg, mNesting);
+        } else {
+            jg.writeRaw(' ');
+        }
+        jg.writeRaw('}');
+    }
+
+    public void writeStartArray(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        if (!mArrayIndenter.isInline()) {
+            ++mNesting;
+        }
+        jg.writeRaw('[');
+    }
+
+    public void beforeArrayValues(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        mArrayIndenter.writeIndentation(jg, mNesting);
+    }
+
+    /**
+     * Method called after an array value has been completely
+     * output, and before another value is to be output.
+     *<p>
+     * Default handling (without pretty-printing) will output a single
+     * comma to separate the two. Pretty-printer is
+     * to output a comma as well, but can surround that with other
+     * (white-space) decoration.
+     */
+    public void writeArrayValueSeparator(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeRaw(',');
+        mArrayIndenter.writeIndentation(jg, mNesting);
+    }
+
+    public void writeEndArray(JsonGenerator jg, int nrOfValues)
+        throws IOException, JsonGenerationException
+    {
+        if (!mArrayIndenter.isInline()) {
+            --mNesting;
+        }
+        if (nrOfValues > 0) {
+            mArrayIndenter.writeIndentation(jg, mNesting);
+        } else {
+            jg.writeRaw(' ');
+        }
+        jg.writeRaw(']');
+    }
+
+    /*
+    ////////////////////////////////////////////////////////////
+    // Helper classes
+    ////////////////////////////////////////////////////////////
+     */
+
+    /**
+     * Dummy implementation that adds no indentation whatsoever
+     */
+    public static class NopIndenter
+        implements Indenter
+    {
+        public NopIndenter() { }
+        public void writeIndentation(JsonGenerator jg, int level) { }
+        public boolean isInline() { return true; }
+    }
+
+    /**
+     * This is a very simple indenter that only every adds a
+     * single space for indentation. It is used as the default
+     * indenter for array values.
+     */
+    public static class FixedSpaceIndenter
+        implements Indenter
+    {
+        public FixedSpaceIndenter() { }
+
+        public void writeIndentation(JsonGenerator jg, int level)
+            throws IOException, JsonGenerationException
+        {
+            jg.writeRaw(' ');
+        }
+
+        public boolean isInline() { return true; }
+    }
+
+    /**
+     * Default linefeed-based indenter uses system-specific linefeeds and
+     * 2 spaces for indentation per level.
+     */
+    public static class Lf2SpacesIndenter
+        implements Indenter
+    {
+        final static String SYSTEM_LINE_SEPARATOR;
+        static {
+            String lf = null;
+            try {
+                lf = System.getProperty("line.separator");
+            } catch (Throwable t) { } // access exception?
+            SYSTEM_LINE_SEPARATOR = (lf == null) ? "\n" : lf;
+        }
+
+        final static int SPACE_COUNT = 64;
+        final static char[] SPACES = new char[SPACE_COUNT];
+        static {
+            Arrays.fill(SPACES, ' ');
+        }
+
+        public Lf2SpacesIndenter() { }
+
+        public boolean isInline() { return false; }
+
+        public void writeIndentation(JsonGenerator jg, int level)
+            throws IOException, JsonGenerationException
+        {
+            jg.writeRaw(SYSTEM_LINE_SEPARATOR);
+            level += level; // 2 spaces per level
+            while (level > SPACE_COUNT) { // should never happen but...
+                jg.writeRaw(SPACES, 0, SPACE_COUNT); 
+                level -= SPACES.length;
+            }
+            jg.writeRaw(SPACES, 0, level);
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/Indenter.java b/src/java/org/codehaus/jackson/impl/Indenter.java
new file mode 100644
index 0000000..6249f55
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/Indenter.java
@@ -0,0 +1,23 @@
+package org.codehaus.jackson.impl;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.*;
+
+/**
+ * Interface that defines objects that can produce indentation used
+ * to separate object entries and array values. Indentation in this
+ * context just means insertion of white space, independent of whether
+ * linefeeds are output.
+ */
+public interface Indenter
+{
+    public void writeIndentation(JsonGenerator jg, int level)
+        throws IOException, JsonGenerationException;
+
+    /**
+     * @return True if indenter is considered inline (does not add linefeeds),
+     *   false otherwise
+     */
+    public boolean isInline();
+}
diff --git a/src/java/org/codehaus/jackson/impl/JsonGeneratorBase.java b/src/java/org/codehaus/jackson/impl/JsonGeneratorBase.java
new file mode 100644
index 0000000..42cc157
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/JsonGeneratorBase.java
@@ -0,0 +1,203 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.*;
+
+/**
+ * This base class defines API that a JSON generator exposes
+ * to applications, as well as internal API that sub-classes
+ * have to implement.
+ */
+public abstract class JsonGeneratorBase
+    extends JsonGenerator
+{
+    /*
+    ////////////////////////////////////////////////////
+    // State
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonWriteContext mWriteContext;
+
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonGeneratorBase()
+    {
+        super();
+        mWriteContext = JsonWriteContext.createRootContext();
+    }
+
+    public final void useDefaultPrettyPrinter()
+    {
+        setPrettyPrinter(new DefaultPrettyPrinter());
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, write methods, structural
+    ////////////////////////////////////////////////////
+     */
+
+    public void writeStartArray()
+        throws IOException, JsonGenerationException
+    {
+        // Array is a value, need to verify it's allowed
+        verifyValueWrite("start an array");
+        mWriteContext = mWriteContext.createChildArrayContext();
+        if (mPrettyPrinter != null) {
+            mPrettyPrinter.writeStartArray(this);
+        } else {
+            doWriteStartArray();
+        }
+    }
+
+    protected abstract void doWriteStartArray()
+        throws IOException, JsonGenerationException;
+
+    public void writeEndArray()
+        throws IOException, JsonGenerationException
+    {
+        if (!mWriteContext.inArray()) {
+            reportError("Current context not an array but "+mWriteContext.getType());
+        }
+        if (mPrettyPrinter != null) {
+            mPrettyPrinter.writeEndArray(this, mWriteContext.getEntryCount());
+        } else {
+            doWriteEndArray();
+        }
+        mWriteContext = mWriteContext.getParent();
+    }
+
+    protected abstract void doWriteEndArray()
+        throws IOException, JsonGenerationException;
+
+    public void writeStartObject()
+        throws IOException, JsonGenerationException
+    {
+        verifyValueWrite("start an object");
+        mWriteContext = mWriteContext.createChildObjectContext();
+        if (mPrettyPrinter != null) {
+            mPrettyPrinter.writeStartObject(this);
+        } else {
+            doWriteStartObject();
+        }
+    }
+
+    protected abstract void doWriteStartObject()
+        throws IOException, JsonGenerationException;
+
+    public void writeEndObject()
+        throws IOException, JsonGenerationException
+    {
+        if (!mWriteContext.inObject()) {
+            reportError("Current context not an object but "+mWriteContext.getType());
+        }
+        mWriteContext = mWriteContext.getParent();
+        if (mPrettyPrinter != null) {
+            mPrettyPrinter.writeEndObject(this, mWriteContext.getEntryCount());
+        } else {
+            doWriteEndObject();
+        }
+    }
+
+    protected abstract void doWriteEndObject()
+        throws IOException, JsonGenerationException;
+
+    public void writeFieldName(String name)
+        throws IOException, JsonGenerationException
+    {
+        // Object is a value, need to verify it's allowed
+        int status = mWriteContext.writeFieldName(name);
+        if (status == JsonWriteContext.STATUS_EXPECT_VALUE) {
+            reportError("Can not write a field name, expecting a value");
+        }
+        doWriteFieldName(name, (status == JsonWriteContext.STATUS_OK_AFTER_COMMA));
+    }
+
+    public abstract void doWriteFieldName(String name, boolean commaBefore)
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, write methods, textual
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract void writeString(String text)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeString(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeRaw(String text)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeRaw(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeBinary(byte[] data, int offset, int len)
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, write methods, primitive
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract void writeNumber(int i)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(long l)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(double d)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(float f)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNumber(BigDecimal dec)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeBoolean(boolean state)
+        throws IOException, JsonGenerationException;
+
+    public abstract void writeNull()
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, buffer handling
+    ////////////////////////////////////////////////////
+     */
+
+    protected abstract void releaseBuffers();
+
+    public abstract void flush()
+        throws IOException;
+
+    public abstract void close()
+        throws IOException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Package methods for this, sub-classes
+    ////////////////////////////////////////////////////
+     */
+
+    protected abstract void verifyValueWrite(String typeMsg)
+        throws IOException, JsonGenerationException;
+
+    protected void reportError(String msg)
+        throws JsonGenerationException
+    {
+        throw new JsonGenerationException(msg);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/JsonNumericParserBase.java b/src/java/org/codehaus/jackson/impl/JsonNumericParserBase.java
new file mode 100644
index 0000000..5a82169
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/JsonNumericParserBase.java
@@ -0,0 +1,475 @@
+package org.codehaus.jackson.impl;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.io.NumberInput;
+
+/**
+ * Another intermediate base class used by all Jackson {@JsonParser}
+ * implementations. Contains shared functionality for dealing with
+ * number parsing aspects, independent of input source decoding.
+ *
+ * @author Tatu Saloranta
+ */
+public abstract class JsonNumericParserBase
+    extends JsonParserBase
+{
+    /* Additionally we need to be able to distinguish between
+     * various numeric representations, since we try to use
+     * the fastest one that works for given textual representation.
+     */
+
+    // First, integer types
+
+    final protected static int NR_INT = 0x0001;
+    final protected static int NR_LONG = 0x0002;
+    final protected static int NR_BIGINT = 0x0004;
+
+    // And then floating point types
+
+    final protected static int NR_DOUBLE = 0x008;
+    final protected static int NR_BIGDECIMAL = 0x0010;
+
+    // Also, we need some numeric constants
+
+    final static BigDecimal BD_MIN_LONG = new BigDecimal(Long.MIN_VALUE);
+    final static BigDecimal BD_MAX_LONG = new BigDecimal(Long.MAX_VALUE);
+
+    final static BigDecimal BD_MIN_INT = new BigDecimal(Long.MIN_VALUE);
+    final static BigDecimal BD_MAX_INT = new BigDecimal(Long.MAX_VALUE);
+
+    // These are not very accurate, but have to do...
+    // (note: non-final to prevent inlining)
+
+    static double MIN_LONG_D = (double) Long.MIN_VALUE;
+    static double MAX_LONG_D = (double) Long.MAX_VALUE;
+
+    static double MIN_INT_D = (double) Integer.MIN_VALUE;
+    static double MAX_INT_D = (double) Integer.MAX_VALUE;
+
+    // Digits, numeric
+    final protected static int INT_0 = '0';
+    final protected static int INT_1 = '1';
+    final protected static int INT_2 = '2';
+    final protected static int INT_3 = '3';
+    final protected static int INT_4 = '4';
+    final protected static int INT_5 = '5';
+    final protected static int INT_6 = '6';
+    final protected static int INT_7 = '7';
+    final protected static int INT_8 = '8';
+    final protected static int INT_9 = '9';
+
+    final protected static int INT_MINUS = '-';
+    final protected static int INT_PLUS = '-';
+    final protected static int INT_DECIMAL_POINT = '.';
+
+    final protected static int INT_e = 'e';
+    final protected static int INT_E = 'E';
+
+    final protected static char CHAR_NULL = '\0';
+
+    /*
+    ////////////////////////////////////////////////////
+    // Numeric value holders: multiple fields used for
+    // for efficiency
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Bitfield that indicates which numeric representations
+     * have been calculated for the current type
+     */
+    protected int mNumTypesValid = 0;
+
+    // First primitives
+
+    protected int mNumberInt;
+
+    protected long mNumberLong;
+
+    protected double mNumberDouble;
+
+    // And then object types
+
+    protected BigInteger mNumberBigInt;
+
+    protected BigDecimal mNumberBigDecimal;
+
+    // And then other information about value itself
+
+    /**
+     * Flag that indicates whether numeric value has a negative
+     * value. That is, whether its textual representation starts
+     * with minus character.
+     */
+    protected boolean mNumberNegative;
+
+    /**
+     * Length of integer part of the number, in characters
+     */
+    protected int mIntLength;
+
+    /**
+     * Length of the fractional part (not including decimal
+     * point or exponent), in characters.
+     * Not used for  pure integer values.
+     */
+    protected int mFractLength;
+
+    /**
+     * Length of the exponent part of the number, if any, not
+     * including 'e' marker or sign, just digits. 
+     * Not used for  pure integer values.
+     */
+    protected int mExpLength;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonNumericParserBase(IOContext ctxt)
+    {
+        super(ctxt);
+    }
+
+    protected final JsonToken reset(boolean negative, int intLen, int fractLen, int expLen)
+    {
+        mNumberNegative = negative;
+        mIntLength = intLen;
+        mFractLength = fractLen;
+        mExpLength = expLen;
+        mNumTypesValid = 0; // to force parsing
+        if (fractLen < 1 && expLen < 1) { // integer
+            return (mCurrToken = JsonToken.VALUE_NUMBER_INT);
+        }
+        // Nope, floating point
+        return (mCurrToken = JsonToken.VALUE_NUMBER_FLOAT);
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Additional methods for sub-classes to implement
+    ////////////////////////////////////////////////////
+     */
+
+    protected abstract JsonToken parseNumberText(int ch)
+        throws IOException, JsonParseException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Numeric accessors of public API
+    ////////////////////////////////////////////////////
+     */
+
+    public Number getNumberValue()
+        throws IOException, JsonParseException
+    {
+        if (mNumTypesValid == 0) {
+            parseNumericValue(); // will also check event type
+        }
+        // Separate types for int types
+        if (mCurrToken == JsonToken.VALUE_NUMBER_INT) {
+            if ((mNumTypesValid & NR_INT) != 0) {
+                return Integer.valueOf(mNumberInt);
+            }
+            if ((mNumTypesValid & NR_LONG) != 0) {
+                return Long.valueOf(mNumberLong);
+            }
+            if ((mNumTypesValid & NR_BIGINT) != 0) {
+                return mNumberBigInt;
+            }
+            // Shouldn't get this far but if we do
+            return mNumberBigDecimal;
+        }
+
+        /* And then floating point types. But here optimal type
+         * needs to be big decimal, to avoid losing any data?
+         */
+        if ((mNumTypesValid & NR_BIGDECIMAL) != 0) {
+            return mNumberBigDecimal;
+        }
+        if ((mNumTypesValid & NR_DOUBLE) == 0) { // sanity check
+            throwInternal();
+        }
+        return Double.valueOf(mNumberDouble);
+    }
+
+    public NumberType getNumberType()
+        throws IOException, JsonParseException
+    {
+        if (mNumTypesValid == 0) {
+            parseNumericValue(); // will also check event type
+        }
+        if (mCurrToken == JsonToken.VALUE_NUMBER_INT) {
+            if ((mNumTypesValid & NR_INT) != 0) {
+                return NumberType.INT;
+            }
+            if ((mNumTypesValid & NR_LONG) != 0) {
+                return NumberType.LONG;
+            }
+            return NumberType.BIG_INTEGER;
+        }
+
+        /* And then floating point types. Here optimal type
+         * needs to be big decimal, to avoid losing any data?
+         * However...
+         */
+        if ((mNumTypesValid & NR_BIGDECIMAL) != 0) {
+            return NumberType.BIG_DECIMAL;
+        }
+        return NumberType.DOUBLE;
+    }
+
+    public int getIntValue()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_INT) == 0) {
+            if (mNumTypesValid == 0) { // not parsed at all
+                parseNumericValue(); // will also check event type
+            }
+            if ((mNumTypesValid & NR_INT) == 0) { // wasn't an int natively?
+                convertNumberToInt(); // let's make it so, if possible
+            }
+        }
+        return mNumberInt;
+    }
+
+    public long getLongValue()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_LONG) == 0) {
+            if (mNumTypesValid == 0) {
+                parseNumericValue();
+            }
+            if ((mNumTypesValid & NR_LONG) == 0) {
+                convertNumberToLong();
+            }
+        }
+        return mNumberLong;
+    }
+
+    public double getDoubleValue()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_DOUBLE) == 0) {
+            if (mNumTypesValid == 0) {
+                parseNumericValue();
+            }
+            if ((mNumTypesValid & NR_DOUBLE) == 0) {
+                convertNumberToDouble();
+            }
+        }
+        return mNumberDouble;
+    }
+
+    public BigDecimal getDecimalValue()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_BIGDECIMAL) == 0) {
+            if (mNumTypesValid == 0) {
+                parseNumericValue();
+            }
+            if ((mNumTypesValid & NR_BIGDECIMAL) == 0) {
+                convertNumberToBigDecimal();
+            }
+        }
+        return mNumberBigDecimal;
+    }
+
+
+    /*
+    ////////////////////////////////////////////////////
+    // Conversion from textual to numeric representation
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method that will parse actual numeric value out of a syntactically
+     * valid number value. Type it will parse into depends on whether
+     * it is a floating point number, as well as its magnitude: smallest
+     * legal type (of ones available) is used for efficiency.
+     */
+    protected final void parseNumericValue()
+        throws JsonParseException
+    {
+        // First things first: must be a numeric event
+        if (mCurrToken == null || !mCurrToken.isNumeric()) {
+            reportError("Current token ("+mCurrToken+") not numeric, can not use numeric value accessors");
+        }
+        try {
+            // Int or float?
+            if (mCurrToken == JsonToken.VALUE_NUMBER_INT) {
+                char[] buf = mTextBuffer.getTextBuffer();
+                int offset = mTextBuffer.getTextOffset();
+                if (mNumberNegative) {
+                    ++offset;
+                }
+                if (mIntLength <= 9) { // definitely fits in int
+                    int i = NumberInput.parseInt(buf, offset, mIntLength);
+                    mNumberInt = mNumberNegative ? -i : i;
+                    mNumTypesValid = NR_INT;
+                    return;
+                }
+                if (mIntLength <= 18) { // definitely fits AND is easy to parse using 2 int parse calls
+                    long l = NumberInput.parseLong(buf, offset, mIntLength);
+                    mNumberLong = mNumberNegative ? -l : l;
+                    mNumTypesValid = NR_LONG;
+                    return;
+                }
+                // nope, need the heavy guns...
+                BigInteger bi = new BigInteger(mTextBuffer.contentsAsString());
+                mNumberBigDecimal = new BigDecimal(bi);
+                mNumTypesValid = NR_BIGDECIMAL;
+                return;
+            }
+
+            // Nope: floating point
+
+            /* !!! TBI: Use BigDecimal if need be? And/or optimize with
+             *   faster parsing
+             */
+            String value = mTextBuffer.contentsAsString();
+            mNumberDouble = Double.parseDouble(value);
+            mNumTypesValid = NR_DOUBLE;
+        } catch (NumberFormatException nex) {
+            // Can this ever occur? Due to overflow, maybe?
+            wrapError("Malformed numeric value '"+mTextBuffer.contentsAsString()+"'", nex);
+        }
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Conversions
+    ////////////////////////////////////////////////////
+     */    
+
+    protected void convertNumberToInt()
+        throws IOException, JsonParseException
+    {
+        // First, converting from long ought to be easy
+        if ((mNumTypesValid & NR_LONG) != 0) {
+            // Let's verify it's lossless conversion by simple roundtrip
+            int result = (int) mNumberLong;
+            if (((long) result) != mNumberLong) {
+                reportError("Numeric value ("+getText()+") out of range of int");
+            }
+            mNumberInt = result;
+        } else if ((mNumTypesValid & NR_DOUBLE) != 0) {
+            // Need to check boundaries
+            if (mNumberDouble < MIN_INT_D || mNumberDouble > MAX_INT_D) {
+                reportOverflowInt();
+            }
+            mNumberInt = (int) mNumberDouble;
+        } else if ((mNumTypesValid & NR_BIGDECIMAL) != 0) {
+            if (BD_MIN_INT.compareTo(mNumberBigDecimal) > 0 
+                || BD_MAX_INT.compareTo(mNumberBigDecimal) < 0) {
+                reportOverflowInt();
+            }
+            mNumberLong = mNumberBigDecimal.longValue();
+        } else {
+            throwInternal(); // should never get here
+        }
+
+        mNumTypesValid |= NR_INT;
+    }
+
+    protected void convertNumberToLong()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_INT) != 0) {
+            mNumberLong = (long) mNumberInt;
+        } else if ((mNumTypesValid & NR_DOUBLE) != 0) {
+            // Need to check boundaries
+            if (mNumberDouble < MIN_LONG_D || mNumberDouble > MAX_LONG_D) {
+                reportOverflowLong();
+            }
+            mNumberLong = (long) mNumberDouble;
+        } else if ((mNumTypesValid & NR_BIGDECIMAL) != 0) {
+            if (BD_MIN_LONG.compareTo(mNumberBigDecimal) > 0 
+                || BD_MAX_LONG.compareTo(mNumberBigDecimal) < 0) {
+                reportOverflowLong();
+            }
+            mNumberLong = mNumberBigDecimal.longValue();
+        } else {
+            throwInternal(); // should never get here
+        }
+
+        mNumTypesValid |= NR_LONG;
+    }
+
+    protected void convertNumberToDouble()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_INT) != 0) {
+            mNumberDouble = (double) mNumberInt;
+        } else if ((mNumTypesValid & NR_LONG) != 0) {
+            mNumberDouble = (double) mNumberLong;
+        } else if ((mNumTypesValid & NR_BIGDECIMAL) != 0) {
+            mNumberDouble = mNumberBigDecimal.doubleValue();
+        } else {
+            throwInternal(); // should never get here
+        }
+
+        mNumTypesValid |= NR_DOUBLE;
+    }
+
+    protected void convertNumberToBigDecimal()
+        throws IOException, JsonParseException
+    {
+        if ((mNumTypesValid & NR_INT) != 0) {
+            mNumberBigDecimal = BigDecimal.valueOf((long) mNumberInt);
+        } else if ((mNumTypesValid & NR_LONG) != 0) {
+            mNumberBigDecimal = BigDecimal.valueOf(mNumberLong);
+        } else {
+            /* Otherwise, let's actually parse from String representation,
+             * to avoid rounding errors that non-decimal floating operations
+             * would incur
+             */
+            mNumberBigDecimal = new BigDecimal(getText());
+        }
+        mNumTypesValid |= NR_BIGDECIMAL;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Exception reporting
+    ////////////////////////////////////////////////////
+     */
+
+    protected void reportUnexpectedNumberChar(int ch, String comment)
+        throws JsonParseException
+    {
+        String msg = "Unexpected character ("+getCharDesc(ch)+") in numeric value";
+        if (comment != null) {
+            msg += ": "+comment;
+        }
+        reportError(msg);
+    }
+
+    protected void reportInvalidNumber(String msg)
+        throws JsonParseException
+    {
+        reportError("Invalid numeric value: "+msg);
+    }
+
+
+    protected void reportOverflowInt()
+        throws IOException, JsonParseException
+    {
+        reportError("Numeric value ("+getText()+") out of range of int ("+Integer.MIN_VALUE+" - "+Integer.MAX_VALUE+")");
+    }
+
+    protected void reportOverflowLong()
+        throws IOException, JsonParseException
+    {
+        reportError("Numeric value ("+getText()+") out of range of long ("+Long.MIN_VALUE+" - "+Long.MAX_VALUE+")");
+    }
+
+}
diff --git a/src/java/org/codehaus/jackson/impl/JsonParserBase.java b/src/java/org/codehaus/jackson/impl/JsonParserBase.java
new file mode 100644
index 0000000..7129e0f
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/JsonParserBase.java
@@ -0,0 +1,524 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.util.TextBuffer;
+
+/**
+ * Intermediate base class used by all Jackson {@JsonParser}
+ * implementations. Contains most common things that are independent
+ * of actual underlying input source
+ *
+ * @author Tatu Saloranta
+ */
+public abstract class JsonParserBase
+    extends JsonParser
+{
+    // Control chars:
+    final static int INT_TAB = '\t';
+    final static int INT_LF = '\n';
+    final static int INT_CR = '\r';
+    final static int INT_SPACE = 0x0020;
+
+    // Markup
+    final static int INT_LBRACKET = '[';
+    final static int INT_RBRACKET = ']';
+    final static int INT_LCURLY = '{';
+    final static int INT_RCURLY = '}';
+    final static int INT_QUOTE = '"';
+    final static int INT_BACKSLASH = '\\';
+    final static int INT_SLASH = '/';
+
+    // Letters we need
+    final static int INT_b = 'b';
+    final static int INT_f = 'f';
+    final static int INT_n = 'n';
+    final static int INT_r = 'r';
+    final static int INT_t = 't';
+    final static int INT_u = 'u';
+
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * I/O context for this reader. It handles buffer allocation
+     * for the reader.
+     */
+    final protected IOContext mIOContext;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Current input data
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonToken mCurrToken;
+
+    // Note: type of actual buffer depends on sub-class, can't include
+
+    /**
+     * Pointer to next available character in buffer
+     */
+    protected int mInputPtr = 0;
+
+    /**
+     * Index of character after last available one in the buffer.
+     */
+    protected int mInputLast = 0;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Current input location information
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Number of characters that were contained in previous blocks
+     * (blocks that were already processed prior to the current buffer).
+     */
+    protected long mCurrInputProcessed = 0L;
+
+    /**
+     * Current row location of current point in input buffer, starting
+     * from 1
+     */
+    protected int mCurrInputRow = 1;
+
+    /**
+     * Current index of the first character of the current row in input
+     * buffer. Needed to calculate column position, if necessary; benefit
+     * of not having column itself is that this only has to be updated
+     * once per line.
+     */
+    protected int mCurrInputRowStart = 0;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Information about starting location of event
+    // Reader is pointing to; updated on-demand
+    ////////////////////////////////////////////////////
+     */
+
+    // // // Location info at point when current token was started
+
+    /**
+     * Total number of characters read before start of current token.
+     * For big (gigabyte-sized) sizes are possible, needs to be long,
+     * unlike pointers and sizes related to in-memory buffers.
+     */
+    protected long mTokenInputTotal = 0; 
+
+    /**
+     * Input row on which current token starts, 1-based
+     */
+    protected int mTokenInputRow = 1;
+
+    /**
+     * Column on input row that current token starts; 0-based (although
+     * in the end it'll be converted to 1-based)
+     */
+    protected int mTokenInputCol = 0;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Parsing state
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonReadContext mParsingContext;
+
+    /**
+     * Flag that indicates that the current token has not yet
+     * been fully processed, and needs to be finished for
+     * some access (or skipped to obtain the next token)
+     */
+    protected boolean mTokenIncomplete = false;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Buffer(s) for local name(s) and text content
+    ////////////////////////////////////////////////////
+     */
+    /**
+     * Buffer that contains contents of String values, including
+     * field names if necessary (name split across boundary,
+     * contains escape sequence, or access needed to char array)
+     */
+    protected final TextBuffer mTextBuffer;
+
+    /**
+     * Flag set to indicate whether field name parsed is available
+     * from the text buffer or not.
+     */
+    protected boolean mFieldInBuffer = false;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonParserBase(IOContext ctxt)
+    {
+        mIOContext = ctxt;
+        mTextBuffer = ctxt.constructTextBuffer();
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Abstract methods needed from sub-classes
+    ////////////////////////////////////////////////////
+     */
+
+    protected abstract void finishToken()
+        throws IOException, JsonParseException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // JsonParser impl
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract JsonToken nextToken()
+        throws IOException, JsonParseException;
+
+
+    /**
+     * @return Type of the token this parser currently points to,
+     *   if any: null both before any tokens have been read, and
+     *   after end-of-input has been encountered.
+     */
+    public JsonToken getCurrentToken()
+    {
+        return mCurrToken;
+    }
+
+    public boolean hasCurrentToken()
+    {
+        return mCurrToken != null;
+    }
+
+    /**
+     * Method that can be called to get the name associated with
+     * the current event. Will return null for all token types
+     * except for {@link JsonToken#FIELD_NAME}.
+     */
+    public String getCurrentName()
+        throws IOException, JsonParseException
+    {
+        return (mCurrToken == JsonToken.FIELD_NAME) ? mParsingContext.getCurrentName() : null;
+    }
+
+    public void close()
+        throws IOException
+    {
+        closeInput();
+        // Also, internal buffer(s) can now be released as well
+        releaseBuffers();
+    }
+
+    public JsonReadContext getParsingContext()
+    {
+        return mParsingContext;
+    }
+
+
+    /**
+     * Method that return the <b>starting</b> location of the current
+     * token; that is, position of the first character from input
+     * that starts the current token.
+     */
+    public JsonLocation getTokenLocation()
+    {
+        return new JsonLocation(mIOContext.getSourceReference(),
+                                mTokenInputTotal,
+                                mTokenInputRow, mTokenInputCol + 1);
+    }
+
+    /**
+     * Method that returns location of the last processed character;
+     * usually for error reporting purposes
+     */
+    public JsonLocation getCurrentLocation()
+    {
+        return new JsonLocation(mIOContext.getSourceReference(),
+                                mCurrInputProcessed + mInputPtr - 1,
+                                mCurrInputRow, mInputPtr - mCurrInputRowStart);
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, access to token information, text
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method for accessing textual representation of the current event;
+     * if no current event (before first call to {@link #nextToken}, or
+     * after encountering end-of-input), returns null.
+     * Method can be called for any event.
+     */
+    public String getText()
+        throws IOException, JsonParseException
+    {
+        if (mTokenIncomplete) {
+            finishToken();
+        }
+        if (mCurrToken != null) { // null only before/after document
+            switch (mCurrToken) {
+            case FIELD_NAME:
+                return mParsingContext.getCurrentName();
+
+            case VALUE_STRING:
+            case VALUE_NUMBER_INT:
+            case VALUE_NUMBER_FLOAT:
+                return mTextBuffer.contentsAsString();
+                
+            default:
+                return mCurrToken.asString();
+            }
+        }
+        return null;
+    }
+
+    public char[] getTextCharacters()
+        throws IOException, JsonParseException
+    {
+        if (mTokenIncomplete) {
+            finishToken();
+        }
+        if (mCurrToken != null) { // null only before/after document
+            switch (mCurrToken) {
+                
+            case FIELD_NAME:
+                if (!mFieldInBuffer) {
+                    mTextBuffer.resetWithString(mParsingContext.getCurrentName());
+                    mFieldInBuffer = true;
+                }
+                return mTextBuffer.getTextBuffer();
+
+            case VALUE_STRING:
+            case VALUE_NUMBER_INT:
+            case VALUE_NUMBER_FLOAT:
+                return mTextBuffer.getTextBuffer();
+                
+            default:
+                return mCurrToken.asCharArray();
+            }
+        }
+        return null;
+    }
+
+    public int getTextLength()
+        throws IOException, JsonParseException
+    {
+        if (mTokenIncomplete) {
+            finishToken();
+        }
+        if (mCurrToken != null) { // null only before/after document
+            switch (mCurrToken) {
+                
+            case FIELD_NAME:
+                return mParsingContext.getCurrentName().length();
+            case VALUE_STRING:
+            case VALUE_NUMBER_INT:
+            case VALUE_NUMBER_FLOAT:
+                return mTextBuffer.size();
+                
+            default:
+                return mCurrToken.asCharArray().length;
+            }
+        }
+        return 0;
+    }
+
+    public int getTextOffset()
+        throws IOException, JsonParseException
+    {
+        if (mTokenIncomplete) {
+            finishToken();
+        }
+
+        // Most have offset of 0, only some may have other values:
+        if (mCurrToken != null) {
+            switch (mCurrToken) {
+            case FIELD_NAME:
+                return 0;
+            case VALUE_STRING:
+            case VALUE_NUMBER_INT:
+            case VALUE_NUMBER_FLOAT:
+                return mTextBuffer.getTextOffset();
+            }
+        }
+        return 0;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public low-level accessors
+    ////////////////////////////////////////////////////
+     */
+
+    public final long getTokenCharacterOffset() { return mTokenInputTotal; }
+    public final int getTokenLineNr() { return mTokenInputRow; }
+    public final int getTokenColumnNr() { return mTokenInputCol; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Low-level reading, linefeed handling
+    ////////////////////////////////////////////////////
+     */
+
+    protected final void skipCR()
+        throws IOException
+    {
+        if (mInputPtr < mInputLast || loadMore()) {
+            ++mInputPtr;
+        }
+        ++mCurrInputRow;
+        mCurrInputRowStart = mInputPtr;
+    }
+
+    protected final void skipLF()
+        throws IOException
+    {
+        ++mCurrInputRow;
+        mCurrInputRowStart = mInputPtr;
+    }
+
+    protected final void markLF() {
+        ++mCurrInputRow;
+        mCurrInputRowStart = mInputPtr;
+    }
+
+    protected final void markLF(int inputPtr) {
+        ++mCurrInputRow;
+        mCurrInputRowStart = inputPtr;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Low-level reading, other
+    ////////////////////////////////////////////////////
+     */
+
+    protected abstract boolean loadMore()
+        throws IOException;
+
+    protected abstract char getNextChar(String eofMsg)
+        throws IOException, JsonParseException;
+
+    protected abstract void closeInput()
+        throws IOException;
+
+    /**
+     * Method called to release internal buffers owned by the base
+     * reader. This may be called along with {@link #closeReader} (for
+     * example, when explicitly closing this reader instance), or
+     * separately (if need be).
+     */
+    protected void releaseBuffers()
+        throws IOException
+    {
+        mTextBuffer.releaseBuffers();
+    }
+
+    /**
+     * Method called when an EOF is encountered between tokens.
+     * If so, it may be a legitimate EOF, but only iff there
+     * is no open non-root context.
+     */
+    protected void handleEOF()
+        throws JsonParseException
+    {
+        if (!mParsingContext.isRoot()) {
+            reportInvalidEOF(": expected close marker for "+mParsingContext.getTypeDesc()+" (from "+mParsingContext.getStartLocation(mIOContext.getSourceReference())+")");
+        }
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Error reporting
+    ////////////////////////////////////////////////////
+     */
+
+    protected void reportUnexpectedChar(int ch, String comment)
+        throws JsonParseException
+    {
+        String msg = "Unexpected character ("+getCharDesc(ch)+")";
+        if (comment != null) {
+            msg += ": "+comment;
+        }
+        reportError(msg);
+    }
+
+    protected void reportInvalidEOF(String msg)
+        throws JsonParseException
+    {
+        reportError("Unexpected end-of-input"+msg);
+    }
+
+    protected void throwInvalidSpace(int i)
+        throws JsonParseException
+    {
+        char c = (char) i;
+        String msg = "Illegal character ("+getCharDesc(c)+"): only regular white space (\\r, \\n, \\t) is allowed between tokens";
+        reportError(msg);
+    }
+
+    protected void throwUnquotedSpace(int i, String ctxtDesc)
+        throws JsonParseException
+    {
+        char c = (char) i;
+        String msg = "Illegal unquoted character ("+getCharDesc(c)+"): has to be escaped using backslash to be included in "+ctxtDesc;
+        reportError(msg);
+    }
+
+    protected void reportMismatchedEndMarker(int actCh, char expCh)
+        throws JsonParseException
+    {
+        String startDesc = ""+mParsingContext.getStartLocation(mIOContext.getSourceReference());
+        reportError("Unexpected close marker '"+((char) actCh)+"': expected '"+expCh+"' (for "+mParsingContext.getTypeDesc()+" starting at "+startDesc+")");
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Error reporting, generic
+    ////////////////////////////////////////////////////
+     */
+
+    protected static String getCharDesc(int ch)
+    {
+        char c = (char) ch;
+        if (Character.isISOControl(c)) {
+            return "(CTRL-CHAR, code "+ch+")";
+        }
+        if (ch > 255) {
+            return "'"+c+"' (code "+ch+" / 0x"+Integer.toHexString(ch)+")";
+        }
+        return "'"+c+"' (code "+ch+")";
+    }
+
+    protected void reportError(String msg)
+        throws JsonParseException
+    {
+        throw new JsonParseException(msg, getCurrentLocation());
+    }
+
+    protected void wrapError(String msg, Throwable t)
+        throws JsonParseException
+    {
+        throw new JsonParseException(msg, getCurrentLocation(), t);
+    }
+
+    protected void throwInternal()
+    {
+        throw new RuntimeException("Internal error: this code path should never get executed");
+    }
+
+}
diff --git a/src/java/org/codehaus/jackson/impl/ReaderBasedNumericParser.java b/src/java/org/codehaus/jackson/impl/ReaderBasedNumericParser.java
new file mode 100644
index 0000000..1a02971
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/ReaderBasedNumericParser.java
@@ -0,0 +1,302 @@
+package org.codehaus.jackson.impl;
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonToken;
+
+/**
+ * Intermediate class that implements handling of numeric parsing.
+ * Separate from the actual parser class just to isolate numeric
+ * parsing: would be nice to use aggregation, but unfortunately
+ * many parts are hard to implement without direct access to
+ * underlying buffers.
+ */
+public abstract class ReaderBasedNumericParser
+    extends ReaderBasedParserBase
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    public ReaderBasedNumericParser(IOContext pc, Reader r)
+    {
+        super(pc, r);
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Textual parsing of number values
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Initial parsing method for number values. It needs to be able
+     * to parse enough input to be able to determine whether the
+     * value is to be considered a simple integer value, or a more
+     * generic decimal value: latter of which needs to be expressed
+     * as a floating point number. The basic rule is that if the number
+     * has no fractional or exponential part, it is an integer; otherwise
+     * a floating point number.
+     *<p>
+     * Because much of input has to be processed in any case, no partial
+     * parsing is done: all input text will be stored for further
+     * processing. However, actual numeric value conversion will be
+     * deferred, since it is usually the most complicated and costliest
+     * part of processing.
+     */
+    protected final JsonToken parseNumberText(int ch)
+        throws IOException, JsonParseException
+    {
+        /* Although we will always be complete with respect to textual
+         * representation (that is, all characters will be parsed),
+         * actual conversion to a number is deferred. Thus, need to
+         * note that no representations are valid yet
+         */
+        boolean negative = (ch == INT_MINUS);
+        int ptr = mInputPtr;
+        int startPtr = ptr-1; // to include sign/digit already read
+        final int inputLen = mInputLast;
+
+        dummy_loop:
+        do { // dummy loop, to be able to break out
+            if (negative) { // need to read the next digit
+                if (ptr >= mInputLast) {
+                    break dummy_loop;
+                }
+                ch = mInputBuffer[ptr++];
+                // First check: must have a digit to follow minus sign
+                if (ch > INT_9 || ch < INT_0) {
+                    reportUnexpectedNumberChar(ch, "expected digit (0-9) to follow minus sign, for valid numeric value");
+                }
+                /* (note: has been checked for non-negative already, in
+                 * the dispatching code that determined it should be
+                 * a numeric value)
+                 */
+            }
+
+            /* First, let's see if the whole number is contained within
+             * the input buffer unsplit. This should be the common case;
+             * and to simplify processing, we will just reparse contents
+             * in the alternative case (number split on buffer boundary)
+             */
+            
+            int intLen = 1; // already got one
+            
+            // First let's get the obligatory integer part:
+            
+            int_loop:
+            while (true) {
+                if (ptr >= mInputLast) {
+                    break dummy_loop;
+                }
+                ch = (int) mInputBuffer[ptr++];
+                if (ch < INT_0 || ch > INT_9) {
+                    break int_loop;
+                }
+                // The only check: no leading zeroes
+                if (++intLen == 2) { // To ensure no leading zeroes
+                    if (mInputBuffer[ptr-2] == '0') {
+                        reportInvalidNumber("Leading zeroes not allowed");
+                    }
+                }
+            }
+
+            int fractLen = 0;
+            
+            // And then see if we get other parts
+            if (ch == INT_DECIMAL_POINT) { // yes, fraction
+                fract_loop:
+                while (true) {
+                    if (ptr >= inputLen) {
+                        break dummy_loop;
+                    }
+                    ch = (int) mInputBuffer[ptr++];
+                    if (ch < INT_0 || ch > INT_9) {
+                        break fract_loop;
+                    }
+                    ++fractLen;
+                }
+                // must be followed by sequence of ints, one minimum
+                if (fractLen == 0) {
+                    reportUnexpectedNumberChar(ch, "Decimal point not followed by a digit");
+                }
+            }
+
+            int expLen = 0;
+            if (ch == INT_e || ch == INT_E) { // and/or expontent
+                if (ptr >= inputLen) {
+                    break dummy_loop;
+                }
+                // Sign indicator?
+                ch = (int) mInputBuffer[ptr++];
+                if (ch == INT_MINUS || ch == INT_PLUS) { // yup, skip for now
+                    if (ptr >= inputLen) {
+                        break dummy_loop;
+                    }
+                    ch = (int) mInputBuffer[ptr++];
+                }
+                while (ch <= INT_9 && ch >= INT_0) {
+                    ++expLen;
+                    if (ptr >= inputLen) {
+                        break dummy_loop;
+                    }
+                    ch = (int) mInputBuffer[ptr++];
+                }
+                // must be followed by sequence of ints, one minimum
+                if (expLen == 0) {
+                    reportUnexpectedNumberChar(ch, "Exponent indicator not followed by a digit");
+                }
+            }
+
+            // Got it all: let's add to text buffer for parsing, access
+            --ptr; // need to push back following separator
+            mInputPtr = ptr;
+            int len = ptr-startPtr;
+            mTextBuffer.resetWithShared(mInputBuffer, startPtr, len);
+            return reset(negative, intLen, fractLen, expLen);
+        } while (false);
+
+        mInputPtr = negative ? (startPtr+1) : startPtr;
+        return parseNumberText2(negative);
+    }
+
+    /**
+     * Method called to parse a number, when the primary parse
+     * method has failed to parse it, due to it being split on
+     * buffer boundary. As a result code is very similar, except
+     * that it has to explicitly copy contents to the text buffer
+     * instead of just sharing the main input buffer.
+     */
+    private final JsonToken parseNumberText2(boolean negative)
+        throws IOException, JsonParseException
+    {
+        mTextBuffer.resetWithEmpty();
+        char[] outBuf = mTextBuffer.getCurrentSegment();
+        int outPtr = 0;
+
+        // Need to prepend sign?
+        if (negative) {
+            outBuf[outPtr++] = '-';
+        }
+
+        char c;
+        int intLen = 0;
+        boolean eof = false;
+
+        // Ok, first the obligatory integer part:
+        int_loop:
+        while (true) {
+            if (mInputPtr >= mInputLast && !loadMore()) {
+                // EOF is legal for main level int values
+                c = CHAR_NULL;
+                eof = true;
+                break int_loop;
+            }
+            c = mInputBuffer[mInputPtr++];
+            if (c < INT_0 || c > INT_9) {
+                break int_loop;
+            }
+            ++intLen;
+            // Quickie check: no leading zeroes allowed
+            if (intLen == 2) {
+                if (outBuf[outPtr-1] == '0') {
+                    reportInvalidNumber("Leading zeroes not allowed");
+                }
+            }
+            if (outPtr >= outBuf.length) {
+                outBuf = mTextBuffer.finishCurrentSegment();
+                outPtr = 0;
+            }
+            outBuf[outPtr++] = c;
+        }
+        // Also, integer part is not optional
+        if (intLen == 0) {
+            reportInvalidNumber("Missing integer part (next char "+getCharDesc(c)+")");
+        }
+
+        int fractLen = 0;
+        // And then see if we get other parts
+        if (c == '.') { // yes, fraction
+            outBuf[outPtr++] = c;
+
+            fract_loop:
+            while (true) {
+                if (mInputPtr >= mInputLast && !loadMore()) {
+                    eof = true;
+                    break fract_loop;
+                }
+                c = mInputBuffer[mInputPtr++];
+                if (c < INT_0 || c > INT_9) {
+                    break fract_loop;
+                }
+                ++fractLen;
+                if (outPtr >= outBuf.length) {
+                    outBuf = mTextBuffer.finishCurrentSegment();
+                    outPtr = 0;
+                }
+                outBuf[outPtr++] = c;
+            }
+            // must be followed by sequence of ints, one minimum
+            if (fractLen == 0) {
+                reportUnexpectedNumberChar(c, "Decimal point not followed by a digit");
+            }
+        }
+
+        int expLen = 0;
+        if (c == 'e' || c == 'E') { // exponent?
+            if (outPtr >= outBuf.length) {
+                outBuf = mTextBuffer.finishCurrentSegment();
+                outPtr = 0;
+            }
+            outBuf[outPtr++] = c;
+            // Not optional, can require that we get one more char
+            c = (mInputPtr < mInputLast) ? mInputBuffer[mInputPtr++]
+                : getNextChar("expected a digit for number exponent");
+            // Sign indicator?
+            if (c == '-' || c == '+') {
+                if (outPtr >= outBuf.length) {
+                    outBuf = mTextBuffer.finishCurrentSegment();
+                    outPtr = 0;
+                }
+                outBuf[outPtr++] = c;
+                // Likewise, non optional:
+                c = (mInputPtr < mInputLast) ? mInputBuffer[mInputPtr++]
+                    : getNextChar("expected a digit for number exponent");
+            }
+
+            exp_loop:
+            while (c <= INT_9 && c >= INT_0) {
+                ++expLen;
+                if (outPtr >= outBuf.length) {
+                    outBuf = mTextBuffer.finishCurrentSegment();
+                    outPtr = 0;
+                }
+                outBuf[outPtr++] = c;
+                if (mInputPtr >= mInputLast && !loadMore()) {
+                    eof = true;
+                    break exp_loop;
+                }
+                c = mInputBuffer[mInputPtr++];
+            }
+            // must be followed by sequence of ints, one minimum
+            if (expLen == 0) {
+                reportUnexpectedNumberChar(c, "Exponent indicator not followed by a digit");
+            }
+        }
+
+        // Ok; unless we hit end-of-input, need to push last char read back
+        if (!eof) {
+            --mInputPtr;
+        }
+        mTextBuffer.setCurrentLength(outPtr);
+
+        // And there we have it!
+        return reset(negative, intLen, fractLen, expLen);
+    }
+
+}
diff --git a/src/java/org/codehaus/jackson/impl/ReaderBasedParser.java b/src/java/org/codehaus/jackson/impl/ReaderBasedParser.java
new file mode 100644
index 0000000..26aa8a1
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/ReaderBasedParser.java
@@ -0,0 +1,575 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.util.*;
+import static org.codehaus.jackson.JsonReadContext.*;
+
+/**
+ * This is a concrete implementation of {@link JsonParser}, which is
+ * based on a {@link java.io.Reader} to handle low-level character
+ * conversion tasks.
+ */
+public final class ReaderBasedParser
+    extends ReaderBasedNumericParser
+{
+
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////////////////
+     */
+
+    final protected SymbolTable mSymbols;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    public ReaderBasedParser(IOContext ioCtxt, Reader r, SymbolTable st)
+    {
+        super(ioCtxt, r);
+        mSymbols = st;
+        mParsingContext = JsonReadContext.createRootContext(this);
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, traversal
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * @return Next token from the stream, if any found, or null
+     *   to indicate end-of-input
+     */
+    public JsonToken nextToken()
+        throws IOException, JsonParseException
+    {
+        if (mTokenIncomplete) {
+            skipPartial();
+        }
+
+        int i;
+
+        // Space to skip?
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    handleEOF();
+                    return (mCurrToken = null);
+                }
+            }
+            i = (int) mInputBuffer[mInputPtr++];
+            if (i > INT_SPACE) {
+                break;
+            }
+            if (i != INT_SPACE) {
+                if (i == INT_LF) {
+                    skipLF();
+                } else if (i == INT_CR) {
+                    skipCR();
+                } else if (i != INT_TAB) {
+                    throwInvalidSpace(i);
+                }
+            }
+        }
+
+        /* First, need to ensure we know the starting location of token
+         * after skipping leading white space
+         */
+        mTokenInputTotal = mCurrInputProcessed + mInputPtr - 1;
+        mTokenInputRow = mCurrInputRow;
+        mTokenInputCol = mInputPtr - mCurrInputRowStart - 1;
+
+        // Closing scope?
+        if (i == INT_RBRACKET) {
+            if (!mParsingContext.isArray()) {
+                reportMismatchedEndMarker(i, ']');
+            }
+            mParsingContext = mParsingContext.getParent();
+            return (mCurrToken = JsonToken.END_ARRAY);
+        }
+        if (i == INT_RCURLY) {
+            if (!mParsingContext.isObject()) {
+                reportMismatchedEndMarker(i, '}');
+            }
+            mParsingContext = mParsingContext.getParent();
+            return (mCurrToken = JsonToken.END_OBJECT);
+        }
+
+        // Nope. Have and/or need a separator?
+        int sep = mParsingContext.handleSeparator(i);
+
+        switch (sep) {
+        case HANDLED_EXPECT_NAME:
+        case HANDLED_EXPECT_VALUE:
+            // Need to skip space, find next char
+            while (true) {
+                if (mInputPtr >= mInputLast) {
+                    if (!loadMore()) {
+                        reportError("Unexpected end-of-input within/between "+mParsingContext.getTypeDesc()+" entries");
+                    }
+                }
+                i = (int) mInputBuffer[mInputPtr++];
+                if (i > INT_SPACE) {
+                    break;
+                }
+                if (i != INT_SPACE) {
+                    if (i == INT_LF) {
+                        skipLF();
+                    } else if (i == INT_CR) {
+                        skipCR();
+                    } else if (i != INT_TAB) {
+                        throwInvalidSpace(i);
+                    }
+                }
+            }
+            // And if we expect a name, must be quote
+            if (sep == HANDLED_EXPECT_NAME) {
+                return handleFieldName(i);
+            }
+            break;
+        case MISSING_COMMA:
+            reportUnexpectedChar(i, "was expecting comma to separate "+mParsingContext.getTypeDesc()+" entries");
+        case MISSING_COLON:
+            reportUnexpectedChar(i, "was expecting colon to separate field name and value");
+        case NOT_EXP_SEPARATOR_NEED_VALUE:
+            break;
+        case NOT_EXP_SEPARATOR_NEED_NAME:
+            return handleFieldName(i);
+        }
+
+        // We now have the first char: what did we get?
+        switch (i) {
+        case INT_QUOTE:
+            return startString();
+        case INT_LBRACKET:
+            mParsingContext = mParsingContext.createChildArrayContext(this);
+            return (mCurrToken = JsonToken.START_ARRAY);
+        case INT_LCURLY:
+            mParsingContext = mParsingContext.createChildObjectContext(this);
+            return (mCurrToken = JsonToken.START_OBJECT);
+        case INT_RBRACKET:
+        case INT_RCURLY:
+            // Error: neither is valid at this point; valid closers have
+            // been handled earlier
+            reportUnexpectedChar(i, "expected a value");
+        case INT_t:
+            return matchToken(JsonToken.VALUE_TRUE);
+        case INT_f:
+            return matchToken(JsonToken.VALUE_FALSE);
+        case INT_n:
+            return matchToken(JsonToken.VALUE_NULL);
+
+        case INT_MINUS:
+            /* Should we have separate handling for plus? Although
+             * it is not allowed per se, it may be erroneously used,
+             * and could be indicate by a more specific error message.
+             */
+        case INT_0:
+        case INT_1:
+        case INT_2:
+        case INT_3:
+        case INT_4:
+        case INT_5:
+        case INT_6:
+        case INT_7:
+        case INT_8:
+        case INT_9:
+            return parseNumberText(i);
+        }
+
+        reportUnexpectedChar(i, "expected a valid value (number, String, array, object, 'true', 'false' or 'null')");
+        return null; // never gets here
+    }
+
+    @Override
+    public void close()
+        throws IOException
+    {
+        super.close();
+        mSymbols.release();
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods, secondary parsing
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonToken handleFieldName(int i)
+        throws IOException, JsonParseException
+    {
+        if (i != INT_QUOTE) {
+            reportUnexpectedChar(i, "was expecting double-quote to start field name");
+        }
+        mFieldInBuffer = false; // by default let's expect it won't get there
+
+        /* First: let's try to see if we have a simple name: one that does
+         * not cross input buffer boundary, and does not contain escape
+         * sequences.
+         */
+        int ptr = mInputPtr;
+        int hash = 0;
+        final int inputLen = mInputLast;
+
+        if (ptr < inputLen) {
+            final int[] codes = CharTypes.getInputCode();
+            final int maxCode = codes.length;
+
+            do {
+                int ch = mInputBuffer[ptr];
+                if (ch < maxCode && codes[ch] != 0) {
+                    if (ch == '"') {
+                        int start = mInputPtr;
+                        mInputPtr = ptr+1; // to skip the quote
+                        String name = mSymbols.findSymbol(mInputBuffer, start, ptr - start, hash);
+                        mParsingContext.setCurrentName(name);
+                        return (mCurrToken = JsonToken.FIELD_NAME);
+                    }
+                    break;
+                }
+                hash = (hash * 31) + ch;
+                ++ptr;
+            } while (ptr < inputLen);
+        }
+
+        int start = mInputPtr;
+        mInputPtr = ptr;
+        return handleFieldName2(start, hash);
+    }
+
+    private JsonToken handleFieldName2(int startPtr, int hash)
+        throws IOException, JsonParseException
+    {
+        mTextBuffer.resetWithShared(mInputBuffer, startPtr, (mInputPtr - startPtr));
+
+        /* Output pointers; calls will also ensure that the buffer is
+         * not shared and has room for at least one more char.
+         */
+        char[] outBuf = mTextBuffer.getCurrentSegment();
+        int outPtr = mTextBuffer.getCurrentSegmentSize();
+
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(": was expecting closing quote for name");
+                }
+            }
+            char c = mInputBuffer[mInputPtr++];
+            int i = (int) c;
+            if (i <= INT_BACKSLASH) {
+                if (i == INT_BACKSLASH) {
+                    /* Although chars outside of BMP are to be escaped as
+                     * an UTF-16 surrogate pair, does that affect decoding?
+                     * For now let's assume it does not.
+                     */
+                    c = decodeEscaped();
+                } else if (i <= INT_QUOTE) {
+                    if (i == INT_QUOTE) {
+                        break;
+                    }
+                    if (i < INT_SPACE) {
+                        throwUnquotedSpace(i, "name");
+                    }
+                }
+            }
+            hash = (hash * 31) + i;
+            // Ok, let's add char to output:
+            outBuf[outPtr++] = c;
+
+            // Need more room?
+            if (outPtr >= outBuf.length) {
+                outBuf = mTextBuffer.finishCurrentSegment();
+                outPtr = 0;
+            }
+        }
+        mTextBuffer.setCurrentLength(outPtr);
+        {
+            mFieldInBuffer = true; // yep, is now stored in text buffer
+            TextBuffer tb = mTextBuffer;
+            char[] buf = tb.getTextBuffer();
+            int start = tb.getTextOffset();
+            int len = tb.size();
+
+            mParsingContext.setCurrentName(mSymbols.findSymbol(buf, start, len, hash));
+        }
+        return (mCurrToken = JsonToken.FIELD_NAME);
+    }
+
+    protected JsonToken startString()
+        throws IOException, JsonParseException
+    {
+        /* First: let's try to see if we have simple String value: one
+         * that does not cross input buffer boundary, and does not
+         * contain escape sequences.
+         */
+        int ptr = mInputPtr;
+        final int inputLen = mInputLast;
+
+        if (ptr < inputLen) {
+            final int[] codes = CharTypes.getInputCode();
+            final int maxCode = codes.length;
+
+            do {
+                int ch = mInputBuffer[ptr];
+                if (ch < maxCode && codes[ch] != 0) {
+                    if (ch == '"') {
+                        mTextBuffer.resetWithShared(mInputBuffer, mInputPtr, (ptr-mInputPtr));
+                        mInputPtr = ptr+1;
+                        return (mCurrToken = JsonToken.VALUE_STRING);
+                    }
+                    break;
+                }
+                ++ptr;
+            } while (ptr < inputLen);
+        }
+
+        /* Nope: either ran out of input, or bumped into an escape
+         * sequence. Either way, let's defer further parsing to ensure
+         * String value is actually needed.
+         */
+        //int start = mInputPtr;
+        mTextBuffer.resetWithShared(mInputBuffer, mInputPtr, (ptr-mInputPtr));
+        mInputPtr = ptr;
+        mTokenIncomplete = true;
+        return (mCurrToken = JsonToken.VALUE_STRING);
+    }
+
+    protected void finishString()
+        throws IOException, JsonParseException
+    {
+        /* Output pointers; calls will also ensure that the buffer is
+         * not shared and has room for at least one more char.
+         */
+        char[] outBuf = mTextBuffer.getCurrentSegment();
+        int outPtr = mTextBuffer.getCurrentSegmentSize();
+
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(": was expecting closing quote for a string value");
+                }
+            }
+            char c = mInputBuffer[mInputPtr++];
+            int i = (int) c;
+            if (i <= INT_BACKSLASH) {
+                if (i == INT_BACKSLASH) {
+                    /* Although chars outside of BMP are to be escaped as
+                     * an UTF-16 surrogate pair, does that affect decoding?
+                     * For now let's assume it does not.
+                     */
+                    c = decodeEscaped();
+                } else if (i <= INT_QUOTE) {
+                    if (i == INT_QUOTE) {
+                        break;
+                    }
+                    if (i < INT_SPACE) {
+                        throwUnquotedSpace(i, "string value");
+                    }
+                }
+            }
+            // Need more room?
+            if (outPtr >= outBuf.length) {
+                outBuf = mTextBuffer.finishCurrentSegment();
+                outPtr = 0;
+            }
+            // Ok, let's add char to output:
+            outBuf[outPtr++] = c;
+        }
+        mTextBuffer.setCurrentLength(outPtr);
+    }
+
+    /**
+     * Method called to skim through rest of unparsed String value,
+     * if it is not needed. This can be done bit faster if contents
+     * need not be stored for future access.
+     */
+    protected void skipString()
+        throws IOException, JsonParseException
+    {
+        int inputPtr = mInputPtr;
+        int inputLen = mInputLast;
+        char[] inputBuffer = mInputBuffer;
+
+        while (true) {
+            if (inputPtr >= inputLen) {
+                mInputPtr = inputPtr;
+                if (!loadMore()) {
+                    reportInvalidEOF(": was expecting closing quote for a string value");
+                }
+                inputPtr = mInputPtr;
+                inputLen = mInputLast;
+            }
+            char c = inputBuffer[inputPtr++];
+            int i = (int) c;
+            if (i <= INT_BACKSLASH) {
+                if (i == INT_BACKSLASH) {
+                    /* Although chars outside of BMP are to be escaped as
+                     * an UTF-16 surrogate pair, does that affect decoding?
+                     * For now let's assume it does not.
+                     */
+                    mInputPtr = inputPtr;
+                    c = decodeEscaped();
+                    inputPtr = mInputPtr;
+                    inputLen = mInputLast;
+                } else if (i <= INT_QUOTE) {
+                    if (i == INT_QUOTE) {
+                        mInputPtr = inputPtr;
+                        break;
+                    }
+                    if (i < INT_SPACE) {
+                        mInputPtr = inputPtr;
+                        throwUnquotedSpace(i, "string value");
+                    }
+                }
+            }
+        }
+    }
+
+    protected JsonToken matchToken(JsonToken token)
+        throws IOException, JsonParseException
+    {
+        // First char is already matched, need to check the rest
+        String matchStr = token.asString();
+        int i = 1;
+
+        for (int len = matchStr.length(); i < len; ++i) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(" in a value");
+                }
+            }
+            char c = mInputBuffer[mInputPtr];
+            if (c != matchStr.charAt(i)) {
+                reportInvalidToken(matchStr.substring(0, i));
+            }
+            ++mInputPtr;
+        }
+        /* Ok, fine; let's not bother checking anything beyond keyword.
+         * If there's something wrong there, it'll cause a parsing
+         * error later on.
+         */
+        return (mCurrToken = token);
+    }
+
+    private void reportInvalidToken(String matchedPart)
+        throws IOException, JsonParseException
+    {
+        StringBuilder sb = new StringBuilder(matchedPart);
+        /* Let's just try to find what appears to be the token, using
+         * regular Java identifier character rules. It's just a heuristic,
+         * nothing fancy here.
+         */
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    break;
+                }
+            }
+            char c = mInputBuffer[mInputPtr];
+            if (!Character.isJavaIdentifierPart(c)) {
+                break;
+            }
+            ++mInputPtr;
+            sb.append(c);
+        }
+
+        reportError("Unrecognized token '"+sb.toString()+"': was expecting 'null', 'true' or 'false'");
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods, other parsing
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method called to process and skip remaining contents of a
+     * partially read token.
+     */
+    protected final void skipPartial()
+        throws IOException, JsonParseException
+    {
+        mTokenIncomplete = false;
+        if (mCurrToken == JsonToken.VALUE_STRING) {
+            skipString();
+        } else {
+            throwInternal();
+        }
+    }
+
+    /**
+     * Method called to finish parsing of a partially parsed token,
+     * in order to access information regarding it.
+     */
+    protected final void finishToken()
+        throws IOException, JsonParseException
+    {
+        mTokenIncomplete = false;
+        if (mCurrToken == JsonToken.VALUE_STRING) {
+            finishString();
+        } else {
+            throwInternal();
+        }
+    }
+
+    protected final char decodeEscaped()
+        throws IOException, JsonParseException
+    {
+        if (mInputPtr >= mInputLast) {
+            if (!loadMore()) {
+                reportInvalidEOF(" in character escape sequence");
+            }
+        }
+        char c = mInputBuffer[mInputPtr++];
+
+        switch ((int) c) {
+            // First, ones that are mapped
+        case INT_b:
+            return '\b';
+        case INT_t:
+            return '\t';
+        case INT_n:
+            return '\n';
+        case INT_f:
+            return '\f';
+        case INT_r:
+            return '\r';
+
+            // And these are to be returned as they are
+        case INT_QUOTE:
+        case INT_SLASH:
+        case INT_BACKSLASH:
+            return c;
+
+        case INT_u: // and finally hex-escaped
+            break;
+
+        default:
+            reportError("Unrecognized character escape "+getCharDesc(c));
+        }
+
+        // Ok, a hex escape. Need 4 characters
+        int value = 0;
+        for (int i = 0; i < 4; ++i) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(" in character escape sequence");
+                }
+            }
+            int ch = (int) mInputBuffer[mInputPtr++];
+            int digit = CharTypes.charToHex(ch);
+            if (digit < 0) {
+                reportUnexpectedChar(ch, "expected a hex-digit for character escape sequence");
+            }
+            value = (value << 4) | digit;
+        }
+        return (char) value;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/ReaderBasedParserBase.java b/src/java/org/codehaus/jackson/impl/ReaderBasedParserBase.java
new file mode 100644
index 0000000..aea15c7
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/ReaderBasedParserBase.java
@@ -0,0 +1,131 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.util.TextBuffer;
+
+/**
+ * This is a simple low-level input reader base class, used by
+ * JSON parser.
+ * The reason for sub-classing (over composition)
+ * is due to need for direct access to character buffers
+ * and positions.
+ *
+ * @author Tatu Saloranta
+ */
+public abstract class ReaderBasedParserBase
+    extends JsonNumericParserBase
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Reader that can be used for reading more content, if one
+     * buffer from input source, but in some cases pre-loaded buffer
+     * is handed to the parser.
+     */
+    protected Reader mReader;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Current input data
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Current buffer from which data is read; generally data is read into
+     * buffer from input source.
+     */
+    protected char[] mInputBuffer;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    protected ReaderBasedParserBase(IOContext ctxt, Reader r)
+    {
+        super(ctxt);
+        mReader = r;
+        mInputBuffer = ctxt.allocTokenBuffer();
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Low-level reading, other
+    ////////////////////////////////////////////////////
+     */
+
+    protected final boolean loadMore()
+        throws IOException
+    {
+        mCurrInputProcessed += mInputLast;
+        mCurrInputRowStart -= mInputLast;
+
+        if (mReader != null) {
+            int count = mReader.read(mInputBuffer, 0, mInputBuffer.length);
+            if (count > 0) {
+                mInputPtr = 0;
+                mInputLast = count;
+                return true;
+            }
+            // End of input
+            closeInput();
+            // Should never return 0, so let's fail
+            if (count == 0) {
+                throw new IOException("Reader returned 0 characters when trying to read "+mInputLast);
+            }
+        }
+        return false;
+    }
+
+
+    protected char getNextChar(String eofMsg)
+        throws IOException, JsonParseException
+    {
+        if (mInputPtr >= mInputLast) {
+            if (!loadMore()) {
+                reportInvalidEOF(eofMsg);
+            }
+        }
+        return mInputBuffer[mInputPtr++];
+    }
+
+    protected void closeInput()
+        throws IOException
+    {
+        Reader r = mReader;
+        if (r != null) {
+            mReader = null;
+            /* Reader takes care of returning buffers it uses. Likewise,
+             * we need to take care of returning temporary buffers
+             * we have allocated.
+             */
+            r.close();
+        }
+    }
+
+    /**
+     * Method called to release internal buffers owned by the base
+     * reader. This may be called along with {@link #closeInput} (for
+     * example, when explicitly closing this reader instance), or
+     * separately (if need be).
+     */
+    @Override
+    protected void releaseBuffers()
+        throws IOException
+    {
+        super.releaseBuffers();
+        char[] buf = mInputBuffer;
+        if (buf != null) {
+            mInputBuffer = null;
+            mIOContext.releaseTokenBuffer(buf);
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/StreamBasedParserBase.java b/src/java/org/codehaus/jackson/impl/StreamBasedParserBase.java
new file mode 100644
index 0000000..d8a9cbd
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/StreamBasedParserBase.java
@@ -0,0 +1,144 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.util.TextBuffer;
+
+/**
+ * This is a simple low-level input reader base class, used by
+ * JSON parser. It is used when underlying input source is
+ * a byte stream such as {@link InputStream}.
+ * The reason for sub-classing (over composition)
+ * is due to need for direct access to low-level byte buffers
+ * and positions.
+ *
+ * @author Tatu Saloranta
+ */
+public abstract class StreamBasedParserBase
+    extends JsonNumericParserBase
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Input stream that can be used for reading more content, if one
+     * in use. May be null, if input comes just as a full buffer,
+     * or if the stream has been closed.
+     */
+    protected InputStream mInputStream;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Current input data
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Current buffer from which data is read; generally data is read into
+     * buffer from input source, but in some cases pre-loaded buffer
+     * is handed to the parser.
+     */
+    protected byte[] mInputBuffer;
+
+    /**
+     * Flag that indicates whether the input buffer is recycable (and
+     * needs to be returned to recycler once we are done) or not.
+     */
+    protected boolean mBufferRecyclable;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    protected StreamBasedParserBase(IOContext ctxt, InputStream in,
+                                    byte[] inputBuffer, int start, int end,
+                                    boolean bufferRecyclable)
+    {
+        super(ctxt);
+        mInputStream = in;
+        mInputBuffer = inputBuffer;
+        mInputPtr = start;
+        mInputLast = end;
+        mBufferRecyclable = bufferRecyclable;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Low-level reading, other
+    ////////////////////////////////////////////////////
+     */
+
+    protected final boolean loadMore()
+        throws IOException
+    {
+        mCurrInputProcessed += mInputLast;
+        mCurrInputRowStart -= mInputLast;
+
+        if (mInputStream != null) {
+            int count = mInputStream.read(mInputBuffer, 0, mInputBuffer.length);
+            if (count > 0) {
+                mInputPtr = 0;
+                mInputLast = count;
+                return true;
+            }
+            // End of input
+            closeInput();
+            // Should never return 0, so let's fail
+            if (count == 0) {
+                throw new IOException("Reader returned 0 characters when trying to read "+mInputLast);
+            }
+        }
+        return false;
+    }
+
+
+    protected char getNextChar(String eofMsg)
+        throws IOException, JsonParseException
+    {
+        if (mInputPtr >= mInputLast) {
+            if (!loadMore()) {
+                reportInvalidEOF(eofMsg);
+            }
+        }
+        // !!! TBI
+        //return mInputBuffer[mInputPtr++];
+        return ' ';
+    }
+
+    protected void closeInput()
+        throws IOException
+    {
+        InputStream in = mInputStream;
+        if (in != null) {
+            mInputStream = null;
+            in.close();
+        }
+    }
+
+    /**
+     * Method called to release internal buffers owned by the base
+     * reader. This may be called along with {@link #closeInput} (for
+     * example, when explicitly closing this reader instance), or
+     * separately (if need be).
+     */
+    @Override
+    protected void releaseBuffers()
+        throws IOException
+    {
+        super.releaseBuffers();
+        if (mBufferRecyclable) {
+            byte[] buf = mInputBuffer;
+            if (buf != null) {
+                mInputBuffer = null;
+                mIOContext.releaseReadIOBuffer(buf);
+            }
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/Utf8NumericParser.java b/src/java/org/codehaus/jackson/impl/Utf8NumericParser.java
new file mode 100644
index 0000000..1f05c20
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/Utf8NumericParser.java
@@ -0,0 +1,74 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.JsonToken;
+
+/**
+ * Intermediate class that implements handling of numeric parsing,
+ * when using UTF-8 encoded byte-based input source.
+ * Separate from the actual parser class just to isolate numeric
+ * parsing: would be nice to use aggregation, but unfortunately
+ * many parts are hard to implement without direct access to
+ * underlying buffers.
+ */
+public abstract class Utf8NumericParser
+    extends StreamBasedParserBase
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    public Utf8NumericParser(IOContext pc, InputStream in,
+                             byte[] inputBuffer, int start, int end,
+                             boolean bufferRecyclable)
+    {
+        super(pc, in, inputBuffer, start, end, bufferRecyclable);
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Textual parsing of number values
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Initial parsing method for number values. It needs to be able
+     * to parse enough input to be able to determine whether the
+     * value is to be considered a simple integer value, or a more
+     * generic decimal value: latter of which needs to be expressed
+     * as a floating point number. The basic rule is that if the number
+     * has no fractional or exponential part, it is an integer; otherwise
+     * a floating point number.
+     *<p>
+     * Because much of input has to be processed in any case, no partial
+     * parsing is done: all input text will be stored for further
+     * processing. However, actual numeric value conversion will be
+     * deferred, since it is usually the most complicated and costliest
+     * part of processing.
+     */
+    protected final JsonToken parseNumberText(int ch)
+        throws IOException, JsonParseException
+    {
+        // !!! TBI
+        return null;
+    }
+
+    /**
+     * Method called to parse a number, when the primary parse
+     * method has failed to parse it, due to it being split on
+     * buffer boundary. As a result code is very similar, except
+     * that it has to explicitly copy contents to the text buffer
+     * instead of just sharing the main input buffer.
+     */
+    private final JsonToken parseNumberText2(boolean negative)
+        throws IOException, JsonParseException
+    {
+        // !!! TBI
+        return null;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/Utf8StreamParser.java b/src/java/org/codehaus/jackson/impl/Utf8StreamParser.java
new file mode 100644
index 0000000..ea41672
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/Utf8StreamParser.java
@@ -0,0 +1,598 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.sym.*;
+import org.codehaus.jackson.util.*;
+import static org.codehaus.jackson.JsonReadContext.*;
+
+/**
+ * This is a concrete implementation of {@link JsonParser}, which is
+ * based on a {@link java.io.InputStream} as the input source.
+ */
+public final class Utf8StreamParser
+    extends Utf8NumericParser
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////////////////
+    */
+
+    final protected SymbolTable mSymbols;
+    //final protected NameCanonicalizer mSymbols;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    public Utf8StreamParser(IOContext ctxt, InputStream in,
+                            byte[] inputBuffer, int start, int end,
+                            boolean bufferRecyclable)
+    {
+        super(ctxt, in, inputBuffer, start, end, bufferRecyclable);
+        // !!! TBI
+        mSymbols = null;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, traversal
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * @return Next token from the stream, if any found, or null
+     *   to indicate end-of-input
+     */
+    public JsonToken nextToken()
+        throws IOException, JsonParseException
+    {
+        if (mTokenIncomplete) {
+            skipPartial();
+        }
+
+        int i;
+
+        // Space to skip?
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    handleEOF();
+                    return (mCurrToken = null);
+                }
+            }
+            i = (int) mInputBuffer[mInputPtr++];
+            if (i > INT_SPACE) {
+                break;
+            }
+            if (i != INT_SPACE) {
+                if (i == INT_LF) {
+                    skipLF();
+                } else if (i == INT_CR) {
+                    skipCR();
+                } else if (i != INT_TAB) {
+                    throwInvalidSpace(i);
+                }
+            }
+        }
+
+        /* First, need to ensure we know the starting location of token
+         * after skipping leading white space
+         */
+        mTokenInputTotal = mCurrInputProcessed + mInputPtr - 1;
+        mTokenInputRow = mCurrInputRow;
+        mTokenInputCol = mInputPtr - mCurrInputRowStart - 1;
+
+        // Closing scope?
+        if (i == INT_RBRACKET) {
+            if (!mParsingContext.isArray()) {
+                reportMismatchedEndMarker(i, ']');
+            }
+            mParsingContext = mParsingContext.getParent();
+            return (mCurrToken = JsonToken.END_ARRAY);
+        }
+        if (i == INT_RCURLY) {
+            if (!mParsingContext.isObject()) {
+                reportMismatchedEndMarker(i, '}');
+            }
+            mParsingContext = mParsingContext.getParent();
+            return (mCurrToken = JsonToken.END_OBJECT);
+        }
+
+        // Nope. Have and/or need a separator?
+        int sep = mParsingContext.handleSeparator(i);
+
+        switch (sep) {
+        case HANDLED_EXPECT_NAME:
+        case HANDLED_EXPECT_VALUE:
+            // Need to skip space, find next char
+            while (true) {
+                if (mInputPtr >= mInputLast) {
+                    if (!loadMore()) {
+                        reportError("Unexpected end-of-input within/between "+mParsingContext.getTypeDesc()+" entries");
+                    }
+                }
+                i = (int) mInputBuffer[mInputPtr++];
+                if (i > INT_SPACE) {
+                    break;
+                }
+                if (i != INT_SPACE) {
+                    if (i == INT_LF) {
+                        skipLF();
+                    } else if (i == INT_CR) {
+                        skipCR();
+                    } else if (i != INT_TAB) {
+                        throwInvalidSpace(i);
+                    }
+                }
+            }
+            // And if we expect a name, must be quote
+            if (sep == HANDLED_EXPECT_NAME) {
+                return handleFieldName(i);
+            }
+            break;
+        case MISSING_COMMA:
+            reportUnexpectedChar(i, "was expecting comma to separate "+mParsingContext.getTypeDesc()+" entries");
+        case MISSING_COLON:
+            reportUnexpectedChar(i, "was expecting colon to separate field name and value");
+        case NOT_EXP_SEPARATOR_NEED_VALUE:
+            break;
+        case NOT_EXP_SEPARATOR_NEED_NAME:
+            return handleFieldName(i);
+        }
+
+        // We now have the first char: what did we get?
+        switch (i) {
+        case INT_QUOTE:
+            return startString();
+        case INT_LBRACKET:
+            mParsingContext = mParsingContext.createChildArrayContext(this);
+            return (mCurrToken = JsonToken.START_ARRAY);
+        case INT_LCURLY:
+            mParsingContext = mParsingContext.createChildObjectContext(this);
+            return (mCurrToken = JsonToken.START_OBJECT);
+        case INT_RBRACKET:
+        case INT_RCURLY:
+            // Error: neither is valid at this point; valid closers have
+            // been handled earlier
+            reportUnexpectedChar(i, "expected a value");
+        case INT_t:
+            return matchToken(JsonToken.VALUE_TRUE);
+        case INT_f:
+            return matchToken(JsonToken.VALUE_FALSE);
+        case INT_n:
+            return matchToken(JsonToken.VALUE_NULL);
+
+        case INT_MINUS:
+            /* Should we have separate handling for plus? Although
+             * it is not allowed per se, it may be erroneously used,
+             * and could be indicate by a more specific error message.
+             */
+        case INT_0:
+        case INT_1:
+        case INT_2:
+        case INT_3:
+        case INT_4:
+        case INT_5:
+        case INT_6:
+        case INT_7:
+        case INT_8:
+        case INT_9:
+            return parseNumberText(i);
+        }
+
+        reportUnexpectedChar(i, "expected a valid value (number, String, array, object, 'true', 'false' or 'null')");
+        return null; // never gets here
+    }
+
+    @Override
+    public void close()
+        throws IOException
+    {
+        super.close();
+        // !!! TBI: merge found symbols:
+        //mSymbols.mergeFromChildrelease();
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods, secondary parsing
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonToken handleFieldName(int i)
+        throws IOException, JsonParseException
+    {
+        if (i != INT_QUOTE) {
+            reportUnexpectedChar(i, "was expecting double-quote to start field name");
+        }
+        mFieldInBuffer = false; // by default let's expect it won't get there
+
+        /* First: let's try to see if we have a simple name: one that does
+         * not cross input buffer boundary, and does not contain escape
+         * sequences.
+         */
+        int ptr = mInputPtr;
+        int hash = 0;
+        final int inputLen = mInputLast;
+
+        if (ptr < inputLen) {
+            final int[] codes = CharTypes.getInputCode();
+            final int maxCode = codes.length;
+
+            do {
+                int ch = mInputBuffer[ptr];
+                if (ch < maxCode && codes[ch] != 0) {
+                    if (ch == '"') {
+                        int start = mInputPtr;
+                        mInputPtr = ptr+1; // to skip the quote
+                        // !!! TBI
+                        //String name = mSymbols.findSymbol(mInputBuffer, start, ptr - start, hash);
+                        String name = null;
+                        mParsingContext.setCurrentName(name);
+                        return (mCurrToken = JsonToken.FIELD_NAME);
+                    }
+                    break;
+                }
+                hash = (hash * 31) + ch;
+                ++ptr;
+            } while (ptr < inputLen);
+        }
+
+        int start = mInputPtr;
+        mInputPtr = ptr;
+        return handleFieldName2(start, hash);
+    }
+
+    private JsonToken handleFieldName2(int startPtr, int hash)
+        throws IOException, JsonParseException
+    {
+        // !!! TBI
+        //mTextBuffer.resetWithShared(mInputBuffer, startPtr, (mInputPtr - startPtr));
+
+        /* Output pointers; calls will also ensure that the buffer is
+         * not shared and has room for at least one more char.
+         */
+        char[] outBuf = mTextBuffer.getCurrentSegment();
+        int outPtr = mTextBuffer.getCurrentSegmentSize();
+
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(": was expecting closing quote for name");
+                }
+            }
+            // !!! TBI
+            //char c = mInputBuffer[mInputPtr++];
+            char c = (char) mInputBuffer[mInputPtr++];
+            int i = (int) c;
+            if (i <= INT_BACKSLASH) {
+                if (i == INT_BACKSLASH) {
+                    /* Although chars outside of BMP are to be escaped as
+                     * an UTF-16 surrogate pair, does that affect decoding?
+                     * For now let's assume it does not.
+                     */
+                    c = decodeEscaped();
+                } else if (i <= INT_QUOTE) {
+                    if (i == INT_QUOTE) {
+                        break;
+                    }
+                    if (i < INT_SPACE) {
+                        throwUnquotedSpace(i, "name");
+                    }
+                }
+            }
+            hash = (hash * 31) + i;
+            // Ok, let's add char to output:
+            outBuf[outPtr++] = c;
+
+            // Need more room?
+            if (outPtr >= outBuf.length) {
+                outBuf = mTextBuffer.finishCurrentSegment();
+                outPtr = 0;
+            }
+        }
+        mTextBuffer.setCurrentLength(outPtr);
+        {
+            mFieldInBuffer = true; // yep, is now stored in text buffer
+            TextBuffer tb = mTextBuffer;
+            char[] buf = tb.getTextBuffer();
+            int start = tb.getTextOffset();
+            int len = tb.size();
+
+            mParsingContext.setCurrentName(mSymbols.findSymbol(buf, start, len, hash));
+        }
+        return (mCurrToken = JsonToken.FIELD_NAME);
+    }
+
+    protected JsonToken startString()
+        throws IOException, JsonParseException
+    {
+        /* First: let's try to see if we have simple String value: one
+         * that does not cross input buffer boundary, and does not
+         * contain escape sequences.
+         */
+        int ptr = mInputPtr;
+        final int inputLen = mInputLast;
+
+        if (ptr < inputLen) {
+            final int[] codes = CharTypes.getInputCode();
+            final int maxCode = codes.length;
+
+            do {
+                int ch = mInputBuffer[ptr];
+                if (ch < maxCode && codes[ch] != 0) {
+                    if (ch == '"') {
+                        // !!! TBI
+                        //mTextBuffer.resetWithShared(mInputBuffer, mInputPtr, (ptr-mInputPtr));
+                        mInputPtr = ptr+1;
+                        return (mCurrToken = JsonToken.VALUE_STRING);
+                    }
+                    break;
+                }
+                ++ptr;
+            } while (ptr < inputLen);
+        }
+
+        /* Nope: either ran out of input, or bumped into an escape
+         * sequence. Either way, let's defer further parsing to ensure
+         * String value is actually needed.
+         */
+        //int start = mInputPtr;
+        // !!! TBI
+        //mTextBuffer.resetWithShared(mInputBuffer, mInputPtr, (ptr-mInputPtr));
+        mInputPtr = ptr;
+        mTokenIncomplete = true;
+        return (mCurrToken = JsonToken.VALUE_STRING);
+    }
+
+    protected void finishString()
+        throws IOException, JsonParseException
+    {
+        /* Output pointers; calls will also ensure that the buffer is
+         * not shared and has room for at least one more char.
+         */
+        char[] outBuf = mTextBuffer.getCurrentSegment();
+        int outPtr = mTextBuffer.getCurrentSegmentSize();
+
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(": was expecting closing quote for a string value");
+                }
+            }
+            // !!! TBI
+            //char c = mInputBuffer[mInputPtr++];
+            char c = (char) mInputBuffer[mInputPtr++];
+            int i = (int) c;
+            if (i <= INT_BACKSLASH) {
+                if (i == INT_BACKSLASH) {
+                    c = decodeEscaped();
+                } else if (i <= INT_QUOTE) {
+                    if (i == INT_QUOTE) {
+                        break;
+                    }
+                    if (i < INT_SPACE) {
+                        throwUnquotedSpace(i, "string value");
+                    }
+                }
+            }
+            // Need more room?
+            if (outPtr >= outBuf.length) {
+                outBuf = mTextBuffer.finishCurrentSegment();
+                outPtr = 0;
+            }
+            // Ok, let's add char to output:
+            outBuf[outPtr++] = c;
+        }
+        mTextBuffer.setCurrentLength(outPtr);
+    }
+
+    /**
+     * Method called to skim through rest of unparsed String value,
+     * if it is not needed. This can be done bit faster if contents
+     * need not be stored for future access.
+     */
+    protected void skipString()
+        throws IOException, JsonParseException
+    {
+        int inputPtr = mInputPtr;
+        int inputLen = mInputLast;
+        // !!! TBI
+        //char[] inputBuffer = mInputBuffer;
+        char[] inputBuffer = null;
+
+        while (true) {
+            if (inputPtr >= inputLen) {
+                mInputPtr = inputPtr;
+                if (!loadMore()) {
+                    reportInvalidEOF(": was expecting closing quote for a string value");
+                }
+                inputPtr = mInputPtr;
+                inputLen = mInputLast;
+            }
+            // !!! TBI
+            /*
+            char c = inputBuffer[inputPtr++];
+            int i = (int) c;
+            if (i <= INT_BACKSLASH) {
+                if (i == INT_BACKSLASH) {
+                    // Although chars outside of BMP are to be escaped as
+                    // an UTF-16 surrogate pair, does that affect decoding?
+                    // For now let's assume it does not.
+                    mInputPtr = inputPtr;
+                    c = decodeEscaped();
+                    inputPtr = mInputPtr;
+                    inputLen = mInputLast;
+                } else if (i <= INT_QUOTE) {
+                    if (i == INT_QUOTE) {
+                        mInputPtr = inputPtr;
+                        break;
+                    }
+                    if (i < INT_SPACE) {
+                        mInputPtr = inputPtr;
+                        throwUnquotedSpace(i, "string value");
+                    }
+                    }
+                }
+            */
+        }
+    }
+
+    protected JsonToken matchToken(JsonToken token)
+        throws IOException, JsonParseException
+    {
+        // First char is already matched, need to check the rest
+        String matchStr = token.asString();
+        int i = 1;
+
+        for (int len = matchStr.length(); i < len; ++i) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(" in a value");
+                }
+            }
+            // !!! TBI
+            //char c = mInputBuffer[mInputPtr];
+            char c = (char) mInputBuffer[mInputPtr];
+            if (c != matchStr.charAt(i)) {
+                reportInvalidToken(matchStr.substring(0, i));
+            }
+            ++mInputPtr;
+        }
+        /* Ok, fine; let's not bother checking anything beyond keyword.
+         * If there's something wrong there, it'll cause a parsing
+         * error later on.
+         */
+        return (mCurrToken = token);
+    }
+
+    private void reportInvalidToken(String matchedPart)
+        throws IOException, JsonParseException
+    {
+        StringBuilder sb = new StringBuilder(matchedPart);
+        /* Let's just try to find what appears to be the token, using
+         * regular Java identifier character rules. It's just a heuristic,
+         * nothing fancy here.
+         */
+        while (true) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    break;
+                }
+            }
+            // !!! TBI
+            //char c = mInputBuffer[mInputPtr];
+            char c = (char) mInputBuffer[mInputPtr];
+            if (!Character.isJavaIdentifierPart(c)) {
+                break;
+            }
+            ++mInputPtr;
+            sb.append(c);
+        }
+
+        reportError("Unrecognized token '"+sb.toString()+"': was expecting 'null', 'true' or 'false'");
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods, other parsing
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method called to process and skip remaining contents of a
+     * partially read token.
+     */
+    protected final void skipPartial()
+        throws IOException, JsonParseException
+    {
+        mTokenIncomplete = false;
+        if (mCurrToken == JsonToken.VALUE_STRING) {
+            skipString();
+        } else {
+            throwInternal();
+        }
+    }
+
+    /**
+     * Method called to finish parsing of a partially parsed token,
+     * in order to access information regarding it.
+     */
+    protected final void finishToken()
+        throws IOException, JsonParseException
+    {
+        mTokenIncomplete = false;
+        if (mCurrToken == JsonToken.VALUE_STRING) {
+            finishString();
+        } else {
+            throwInternal();
+        }
+    }
+
+    protected final char decodeEscaped()
+        throws IOException, JsonParseException
+    {
+        if (mInputPtr >= mInputLast) {
+            if (!loadMore()) {
+                reportInvalidEOF(" in character escape sequence");
+            }
+        }
+        int c = (int) mInputBuffer[mInputPtr++];
+
+        switch ((int) c) {
+            // First, ones that are mapped
+        case INT_b:
+            return '\b';
+        case INT_t:
+            return '\t';
+        case INT_n:
+            return '\n';
+        case INT_f:
+            return '\f';
+        case INT_r:
+            return '\r';
+
+            // And these are to be returned as they are
+        case INT_QUOTE:
+        case INT_SLASH:
+        case INT_BACKSLASH:
+            return (char) c;
+
+        case INT_u: // and finally hex-escaped
+            break;
+
+        default:
+            reportError("Unrecognized character escape \\ followed by "+decodeCharForError(c));
+        }
+
+        // Ok, a hex escape. Need 4 characters
+        int value = 0;
+        for (int i = 0; i < 4; ++i) {
+            if (mInputPtr >= mInputLast) {
+                if (!loadMore()) {
+                    reportInvalidEOF(" in character escape sequence");
+                }
+            }
+            int ch = (int) mInputBuffer[mInputPtr++];
+            int digit = CharTypes.charToHex(ch);
+            if (digit < 0) {
+                reportUnexpectedChar(ch, "expected a hex-digit for character escape sequence");
+            }
+            value = (value << 4) | digit;
+        }
+        return (char) value;
+    }
+
+    protected String decodeCharForError(int firstByte)
+    {
+        // !!! TBI
+        //return "'"+((char) firstByte)+"'";
+        return getCharDesc(firstByte);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/impl/WriterBasedGenerator.java b/src/java/org/codehaus/jackson/impl/WriterBasedGenerator.java
new file mode 100644
index 0000000..15a887a
--- /dev/null
+++ b/src/java/org/codehaus/jackson/impl/WriterBasedGenerator.java
@@ -0,0 +1,741 @@
+package org.codehaus.jackson.impl;
+
+import java.io.*;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.*;
+import org.codehaus.jackson.util.CharTypes;
+
+public final class WriterBasedGenerator
+    extends JsonGeneratorBase
+{
+    final static int SHORT_WRITE = 32;
+
+    final static char[] HEX_CHARS = "0123456789ABCDEF".toCharArray();
+
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////////////////
+     */
+
+    final protected IOContext mIOContext;
+
+    final protected Writer mWriter;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Output buffering
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Intermediate buffer in which contents are buffered before
+     * being written using {@link #mWriter}.
+     */
+    protected char[] mOutputBuffer;
+
+    protected int mOutputHead = 0;
+
+    protected int mOutputTail = 0;
+
+    protected int mOutputEnd;
+
+    /**
+     * 6-char temporary buffer allocated if needed, for constructing
+     * escape sequences
+     */
+    protected char[] mEntityBuffer;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    public WriterBasedGenerator(IOContext ctxt, Writer w)
+    {
+        super();
+        mIOContext = ctxt;
+        mWriter = w;
+        mOutputBuffer = ctxt.allocConcatBuffer();
+        mOutputEnd = mOutputBuffer.length;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Output method implementations, structural
+    ////////////////////////////////////////////////////
+     */
+
+    protected void doWriteStartArray()
+        throws IOException, JsonGenerationException
+    {
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '[';
+    }
+
+    protected void doWriteEndArray()
+        throws IOException, JsonGenerationException
+    {
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = ']';
+    }
+
+    protected void doWriteStartObject()
+        throws IOException, JsonGenerationException
+    {
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '{';
+    }
+
+    protected void doWriteEndObject()
+        throws IOException, JsonGenerationException
+    {
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '}';
+    }
+
+    public void doWriteFieldName(String name, boolean commaBefore)
+        throws IOException, JsonGenerationException
+    {
+        if (mPrettyPrinter != null) {
+            if (commaBefore) {
+                mPrettyPrinter.writeObjectEntrySeparator(this);
+            } else {
+                mPrettyPrinter.beforeObjectEntries(this);
+            }
+        } else {
+            if (commaBefore) {
+                if (mOutputTail >= mOutputEnd) {
+                    flushBuffer();
+                }
+                mOutputBuffer[mOutputTail++] = ',';
+            }
+        }
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '"';
+        doWriteString(name);
+        // And finally, closing quotes
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '"';
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Output method implementations, textual
+    ////////////////////////////////////////////////////
+     */
+
+    public void writeString(String text)
+        throws IOException, JsonGenerationException
+    {
+        verifyValueWrite("write text value");
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '"';
+        doWriteString(text);
+        // And finally, closing quotes
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '"';
+    }
+
+    public void writeString(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException
+    {
+        verifyValueWrite("write text value");
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '"';
+        doWriteString(text, offset, len);
+        // And finally, closing quotes
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = '"';
+    }
+
+    public void writeRaw(String text)
+        throws IOException, JsonGenerationException
+    {
+        // Nothing to check, can just output as is
+        int len = text.length();
+        int room = mOutputEnd - mOutputTail;
+
+        if (room == 0) {
+            flushBuffer();
+            room = mOutputEnd - mOutputTail;
+        }
+        // But would it nicely fit in? If yes, it's easy
+        if (room >= len) {
+            text.getChars(0, len, mOutputBuffer, mOutputTail);
+            mOutputTail += len;
+        } else {
+            writeRawLong(text);
+        }
+    }
+
+    public void writeRaw(String text, int start, int len)
+        throws IOException, JsonGenerationException
+    {
+        // Nothing to check, can just output as is
+        int room = mOutputEnd - mOutputTail;
+
+        if (room < len) {
+            flushBuffer();
+            room = mOutputEnd - mOutputTail;
+        }
+        // But would it nicely fit in? If yes, it's easy
+        if (room >= len) {
+            text.getChars(start, start+len, mOutputBuffer, mOutputTail);
+            mOutputTail += len;
+        } else {            	
+            writeRawLong(text.substring(start, start+len));
+        }
+    }
+
+    public void writeRaw(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException
+    {
+        // Only worth buffering if it's a short write?
+        if (len < SHORT_WRITE) {
+            int room = mOutputEnd - mOutputTail;
+            if (len > room) {
+                flushBuffer();
+            }
+            System.arraycopy(text, offset, mOutputBuffer, mOutputTail, len);
+            mOutputTail += len;
+            return;
+        }
+        // Otherwise, better just pass through:
+        flushBuffer();
+        mWriter.write(text, offset, len);
+    }
+
+    public void writeRaw(char c)
+        throws IOException, JsonGenerationException
+    {
+        if (mOutputTail >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputBuffer[mOutputTail++] = c;
+    }
+
+    public void writeBinary(byte[] data, int offset, int len)
+        throws IOException, JsonGenerationException
+    {
+        // !!! TBI: base64-based binary output
+
+        throw new RuntimeException("Not yet implemented");
+    }
+
+    private void writeRawLong(String text)
+        throws IOException, JsonGenerationException
+    {
+        int room = mOutputEnd - mOutputTail;
+        // If not, need to do it by looping
+        text.getChars(0, room, mOutputBuffer, mOutputTail);
+        mOutputTail += room;
+        flushBuffer();
+        int offset = room;
+        int len = text.length() - room;
+
+        while (len > mOutputEnd) {
+            int amount = mOutputEnd;
+            text.getChars(offset, offset+amount, mOutputBuffer, 0);
+            mOutputHead = 0;
+            mOutputTail = amount;
+            flushBuffer();
+            offset += amount;
+            len -= amount;
+        }
+        // And last piece (at most length of buffer)
+        text.getChars(offset, offset+len, mOutputBuffer, 0);
+        mOutputHead = 0;
+        mOutputTail = len;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Output method implementations, primitive
+    ////////////////////////////////////////////////////
+     */
+
+    public void writeNumber(int i)
+        throws IOException, JsonGenerationException
+    {
+        verifyValueWrite("write number");
+        // up to 10 digits, minus sign
+        if ((mOutputTail + 11) >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputTail = NumberOutput.outputInt(i, mOutputBuffer, mOutputTail);
+    }
+
+    public void writeNumber(long l)
+        throws IOException, JsonGenerationException
+    {
+        // up to 20 digits, minus sign
+        verifyValueWrite("write number");
+        if ((mOutputTail + 21) >= mOutputEnd) {
+            flushBuffer();
+        }
+        mOutputTail = NumberOutput.outputLong(l, mOutputBuffer, mOutputTail);
+    }
+
+    public void writeNumber(double d)
+        throws IOException, JsonGenerationException
+    {
+        // What is the max length for doubles? 40 chars?
+        verifyValueWrite("write number");
+
+        // !!! TODO: use a more efficient printing method?
+        writeRaw(String.valueOf(d));
+    }
+
+    public void writeNumber(float f)
+        throws IOException, JsonGenerationException
+    {
+        // What is the max length for floats?
+        verifyValueWrite("write number");
+
+        // !!! TODO: use a more efficient printing method?
+        writeRaw(String.valueOf(f));
+    }
+
+    public void writeNumber(BigDecimal dec)
+        throws IOException, JsonGenerationException
+    {
+        // Don't really know max length for big decimal, no point checking
+        verifyValueWrite("write number");
+
+        // !!! TODO: use a more efficient printing method?
+        writeRaw(dec.toString());
+    }
+
+    public void writeBoolean(boolean state)
+        throws IOException, JsonGenerationException
+    {
+        verifyValueWrite("write boolean value");
+        if ((mOutputTail + 5) >= mOutputEnd) {
+            flushBuffer();
+        }
+        int ptr = mOutputTail;
+        char[] buf = mOutputBuffer;
+        if (state) {
+            buf[ptr] = 't';
+            buf[++ptr] = 'r';
+            buf[++ptr] = 'u';
+            buf[++ptr] = 'e';
+        } else {
+            buf[ptr] = 'f';
+            buf[++ptr] = 'a';
+            buf[++ptr] = 'l';
+            buf[++ptr] = 's';
+            buf[++ptr] = 'e';
+        }
+        mOutputTail = ptr+1;
+    }
+
+    public void writeNull()
+        throws IOException, JsonGenerationException
+    {
+        verifyValueWrite("write null value");
+        if ((mOutputTail + 4) >= mOutputEnd) {
+            flushBuffer();
+        }
+        int ptr = mOutputTail;
+        char[] buf = mOutputBuffer;
+        buf[ptr] = 'n';
+        buf[++ptr] = 'u';
+        buf[++ptr] = 'l';
+        buf[++ptr] = 'l';
+        mOutputTail = ptr+1;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Implementations for other methods
+    ////////////////////////////////////////////////////
+     */
+
+    protected final void verifyValueWrite(String typeMsg)
+        throws IOException, JsonGenerationException
+    {
+        int status = mWriteContext.writeValue();
+        if (status == JsonWriteContext.STATUS_EXPECT_NAME) {
+            reportError("Can not "+typeMsg+", expecting field name");
+        }
+
+        if (mPrettyPrinter == null) {
+            char c;
+            switch (status) {
+            case JsonWriteContext.STATUS_OK_AFTER_COMMA:
+                c = ',';
+                break;
+            case JsonWriteContext.STATUS_OK_AFTER_COLON:
+                c = ':';
+                break;
+            case JsonWriteContext.STATUS_OK_AFTER_SPACE:
+                c = ' ';
+                break;
+            case JsonWriteContext.STATUS_OK_AS_IS:
+            default:
+                return;
+            }
+            if (mOutputTail >= mOutputEnd) {
+                flushBuffer();
+            }
+            mOutputBuffer[mOutputTail++] = c;
+            return;
+        }
+
+        // If we have a pretty printer, it knows what to do:
+        switch (status) {
+        case JsonWriteContext.STATUS_OK_AFTER_COMMA: // array
+            mPrettyPrinter.writeArrayValueSeparator(this);
+            break;
+        case JsonWriteContext.STATUS_OK_AFTER_COLON:
+            mPrettyPrinter.writeObjectFieldValueSeparator(this);
+            break;
+        case JsonWriteContext.STATUS_OK_AFTER_SPACE:
+            mPrettyPrinter.writeRootValueSeparator(this);
+            break;
+        case JsonWriteContext.STATUS_OK_AS_IS:
+            // First entry, but of which context?
+            if (mWriteContext.inArray()) {
+                mPrettyPrinter.beforeArrayValues(this);
+            } else if (mWriteContext.inObject()) {
+                mPrettyPrinter.beforeObjectEntries(this);
+            }
+            break;
+        }
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Low-level output handling
+    ////////////////////////////////////////////////////
+     */
+
+    @Override
+    public final void flush()
+        throws IOException
+    {
+        flushBuffer();
+        mWriter.flush();
+    }
+
+    @Override
+    public void close()
+        throws IOException
+    {
+        flushBuffer();
+        /* Note: writer is responsible for its own buffers (acquired
+         * using processing context), and will close them as appropriate.
+         */
+        mWriter.close();
+        // Also, internal buffer(s) can now be released as well
+        releaseBuffers();
+    }
+
+    @Override
+    protected void releaseBuffers()
+    {
+        char[] buf = mOutputBuffer;
+        if (buf != null) {
+            mOutputBuffer = null;
+            mIOContext.releaseConcatBuffer(buf);
+        }
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods, low-level writing
+    ////////////////////////////////////////////////////
+     */
+
+    private void doWriteString(String text)
+        throws IOException, JsonGenerationException
+    {
+        /* One check first: if String won't fit in the buffer, let's
+         * segment writes. No point in extending buffer to huge sizes
+         * (like if someone wants to include multi-megabyte base64
+         * encoded stuff or such)
+         */
+        int len = text.length();
+        if (len > mOutputEnd) { // Let's reserve space for entity at begin/end
+            doWriteLongString(text);
+            return;
+        }
+
+        // Ok: we know String will fit in buffer ok
+        // But do we need to flush first?
+        if ((mOutputTail + len) > mOutputEnd) {
+            flushBuffer();
+        }
+        text.getChars(0, len, mOutputBuffer, mOutputTail);
+
+        // And then we'll need to verify need for escaping etc:
+        int end = mOutputTail + len;
+        final int[] escCodes = CharTypes.getOutputEscapes();
+        final int escLen = escCodes.length;
+
+        output_loop:
+        while (mOutputTail < end) {
+            // Fast loop for chars not needing escaping
+            escape_loop:
+            while (true) {
+                char c = mOutputBuffer[mOutputTail];
+                if (c < escLen && escCodes[c] != 0) {
+                    break escape_loop;
+                }
+                if (++mOutputTail >= end) {
+                    break output_loop;
+                }
+            }
+
+            // Ok, bumped into something that needs escaping.
+            /* First things first: need to flush the buffer.
+             * Inlined, as we don't want to lose tail pointer
+             */
+            int flushLen = (mOutputTail - mOutputHead);
+            if (flushLen > 0) {
+                mWriter.write(mOutputBuffer, mOutputHead, flushLen);
+            }
+            /* In any case, tail will be the new start, so hopefully
+             * we have room now.
+             */
+            {
+                int escCode = escCodes[mOutputBuffer[mOutputTail]];
+                ++mOutputTail;
+                int needLen = (escCode < 0) ? 6 : 2;
+                // If not, need to call separate method (note: buffer is empty now)
+                if (needLen > mOutputTail) {
+                    mOutputHead = mOutputTail;
+                    writeSingleEscape(escCode);
+                } else {
+                    // But if it fits, can just prepend to buffer
+                    int ptr = mOutputTail - needLen;
+                    mOutputHead = ptr;
+                    appendSingleEscape(escCode, mOutputBuffer, ptr);
+                }
+            }
+        }
+    }
+
+    /**
+     * Method called to write "long strings", strings whose length exceeds
+     * output buffer length.
+     */
+    private void doWriteLongString(String text)
+        throws IOException, JsonGenerationException
+    {
+        // First things first: let's flush the buffer to get some more room
+        flushBuffer();
+
+        // Then we can write 
+        final int textLen = text.length();
+        int offset = 0;
+        do {
+            int max = mOutputEnd;
+            int segmentLen = ((offset + max) > textLen)
+                ? (textLen - offset) : max;
+            text.getChars(offset, offset+segmentLen, mOutputBuffer, 0);
+            doWriteSegment(segmentLen);
+            offset += segmentLen;
+        } while (offset < textLen);
+    }
+    /**
+     * Method called to output textual context which has been copied
+     * to the output buffer prior to call. If any escaping is needed,
+     * it will also be handled by the method.
+     *<p>
+     * Note: when called, textual content to write is within output
+     * buffer, right after buffered content (if any). That's why only
+     * length of that text is passed, as buffer and offset are implied.
+     */
+    private final void doWriteSegment(int end)
+        throws IOException, JsonGenerationException
+    {
+        final int[] escCodes = CharTypes.getOutputEscapes();
+        final int escLen = escCodes.length;
+
+        int ptr = 0;
+
+        output_loop:
+        while (ptr < end) {
+            // Fast loop for chars not needing escaping
+            int start = ptr;
+            while (true) {
+                char c = mOutputBuffer[ptr];
+                if (c < escLen && escCodes[c] != 0) {
+                    break;
+                }
+                if (++ptr >= end) {
+                    break;
+                }
+            }
+
+            // Ok, bumped into something that needs escaping.
+            /* First things first: need to flush the buffer.
+             * Inlined, as we don't want to lose tail pointer
+             */
+            int flushLen = (ptr - start);
+            if (flushLen > 0) {
+                mWriter.write(mOutputBuffer, start, flushLen);
+                if (ptr >= end) {
+                    break output_loop;
+                }
+            }
+            /* In any case, tail will be the new start, so hopefully
+             * we have room now.
+             */
+            {
+                int escCode = escCodes[mOutputBuffer[ptr]];
+                ++ptr;
+                int needLen = (escCode < 0) ? 6 : 2;
+                // If not, need to call separate method (note: buffer is empty now)
+                if (needLen > mOutputTail) {
+                    writeSingleEscape(escCode);
+                } else {
+                    // But if it fits, can just prepend to buffer
+                    ptr -= needLen;
+                    appendSingleEscape(escCode, mOutputBuffer, ptr);
+                }
+            }
+        }
+    }
+
+    /**
+     * This method called when the string content is already in
+     * a char buffer, and need not be copied for processing.
+     */
+    private void doWriteString(char[] text, int offset, int len)
+        throws IOException, JsonGenerationException
+    {
+        /* Let's just find longest spans of non-escapable
+         * content, and for each see if it makes sense
+         * to copy them, or write through
+         */
+        len += offset; // -> len marks the end from now on
+        final int[] escCodes = CharTypes.getOutputEscapes();
+        final int escLen = escCodes.length;
+        while (offset < len) {
+            int start = offset;
+
+            while (true) {
+                char c = text[offset];
+                if (c < escLen && escCodes[c] != 0) {
+                    break;
+                }
+                if (++offset >= len) {
+                    break;
+                }
+            }
+
+            // Short span? Better just copy it to buffer first:
+            int newAmount = offset - start;
+            if (newAmount < SHORT_WRITE) {
+                // Note: let's reserve room for escaped char (up to 6 chars)
+                if ((mOutputTail + newAmount) > mOutputEnd) {
+                    flushBuffer();
+                }
+                if (newAmount > 0) {
+                    System.arraycopy(text, start, mOutputBuffer, mOutputTail, newAmount);
+                    mOutputTail += newAmount;
+                }
+            } else { // Nope: better just write through
+                flushBuffer();
+                mWriter.write(text, start, newAmount);
+            }
+            // Was this the end?
+            if (offset >= len) { // yup
+                break;
+            }
+            // Nope, need to escape the char.
+            int escCode = escCodes[text[offset]];
+            ++offset;
+            int needLen = (escCode < 0) ? 6 : 2;
+            if ((mOutputTail + needLen) > mOutputEnd) {
+                flushBuffer();
+            }
+            appendSingleEscape(escCode, mOutputBuffer, mOutputTail);
+            mOutputTail += needLen;
+        }
+    }
+
+    /**
+     * @param escCode Character code for escape sequence (\C); or -1
+     *   to indicate a generic (\\uXXXX) sequence.
+     */
+    private void writeSingleEscape(int escCode)
+        throws IOException
+    {
+        char[] buf = mEntityBuffer;
+        if (buf == null) {
+            buf = new char[6];
+            buf[0] = '\\';
+            buf[2] = '0';
+            buf[3] = '0';
+        }
+
+        if (escCode < 0) { // control char, value -(char + 1)
+            int value = -(escCode + 1);
+            buf[1] = 'u';
+            // We know it's a control char, so only the last 2 chars are non-0
+            buf[4] = HEX_CHARS[value >> 4];
+            buf[5] = HEX_CHARS[value & 0xF];
+            mWriter.write(buf, 0, 6);
+        } else {
+            buf[1] = (char) escCode;
+            mWriter.write(buf, 0, 2);
+        }
+    }
+
+    private void appendSingleEscape(int escCode, char[] buf, int ptr)
+    {
+        if (escCode < 0) { // control char, value -(char + 1)
+            int value = -(escCode + 1);
+            buf[ptr] = '\\';
+            buf[++ptr] = 'u';
+            // We know it's a control char, so only the last 2 chars are non-0
+            buf[++ptr] = '0';
+            buf[++ptr] = '0';
+            buf[++ptr] = HEX_CHARS[value >> 4];
+            buf[++ptr] = HEX_CHARS[value & 0xF];
+        } else {
+            buf[ptr] = '\\';
+            buf[ptr+1] = (char) escCode;
+        }
+    }
+
+
+    protected void flushBuffer()
+        throws IOException
+    {
+        int len = mOutputTail - mOutputHead;
+        if (len > 0) {
+            int offset = mOutputHead;
+            mOutputTail = mOutputHead = 0;
+            mWriter.write(mOutputBuffer, offset, len);
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/io/BaseReader.java b/src/java/org/codehaus/jackson/io/BaseReader.java
new file mode 100644
index 0000000..b0fb641
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/BaseReader.java
@@ -0,0 +1,117 @@
+
+package org.codehaus.jackson.io;
+
+import java.io.*;
+
+
+/**
+ * Simple basic class for optimized readers in this package; implements
+ * "cookie-cutter" methods that are used by all actual implementations.
+ */
+abstract class BaseReader
+    extends Reader
+{
+    /**
+     * JSON actually limits available Unicode range in the high end
+     * to the same as xml (to basically limit UTF-8 max byte sequence
+     * length to 4)
+     */
+    final protected static int LAST_VALID_UNICODE_CHAR = 0x10FFFF;
+
+    final protected static char NULL_CHAR = (char) 0;
+    final protected static char NULL_BYTE = (byte) 0;
+
+    final protected IOContext mContext;
+
+    protected InputStream mIn;
+
+    protected byte[] mBuffer;
+
+    protected int mPtr;
+    protected int mLength;
+
+    /*
+    ////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////
+    */
+
+    protected BaseReader(IOContext context,
+                         InputStream in, byte[] buf, int ptr, int len)
+    {
+        mContext = context;
+        mIn = in;
+        mBuffer = buf;
+        mPtr = ptr;
+        mLength = len;
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Reader API
+    ////////////////////////////////////////
+    */
+
+    public void close()
+        throws IOException
+    {
+        InputStream in = mIn;
+
+        if (in != null) {
+            mIn = null;
+            freeBuffers();
+            in.close();
+        }
+    }
+
+    char[] mTmpBuf = null;
+
+    /**
+     * Although this method is implemented by the base class, AND it should
+     * never be called by main code, let's still implement it bit more
+     * efficiently just in case
+     */
+    public int read()
+        throws IOException
+    {
+        if (mTmpBuf == null) {
+            mTmpBuf = new char[1];
+        }
+        if (read(mTmpBuf, 0, 1) < 1) {
+            return -1;
+        }
+        return mTmpBuf[0];
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Internal/package methods:
+    ////////////////////////////////////////
+    */
+
+    /**
+     * This method should be called along with (or instead of) normal
+     * close. After calling this method, no further reads should be tried.
+     * Method will try to recycle read buffers (if any).
+     */
+    public final void freeBuffers()
+    {
+        byte[] buf = mBuffer;
+        if (buf != null) {
+            mBuffer = null;
+            mContext.releaseReadIOBuffer(buf);
+        }
+    }
+
+    protected void reportBounds(char[] cbuf, int start, int len)
+        throws IOException
+    {
+        throw new ArrayIndexOutOfBoundsException("read(buf,"+start+","+len+"), cbuf["+cbuf.length+"]");
+    }
+
+    protected void reportStrangeStream()
+        throws IOException
+    {
+        throw new IOException("Strange I/O stream, returned 0 bytes on read");
+    }
+}
diff --git a/src/java/org/codehaus/jackson/io/ByteSourceBootstrapper.java b/src/java/org/codehaus/jackson/io/ByteSourceBootstrapper.java
new file mode 100644
index 0000000..04a0842
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/ByteSourceBootstrapper.java
@@ -0,0 +1,315 @@
+package org.codehaus.jackson.io;
+
+import java.io.*;
+
+import org.codehaus.jackson.JsonParseException;
+
+/**
+ * This class is used to determine the encoding of byte stream
+ * that is to contain JSON content. Rules are fairly simple, and
+ * defined in JSON specification (RFC-4627 or newer), except
+ * for BOM handling, which is a property of underlying
+ * streams.
+ */
+public final class ByteSourceBootstrapper
+{
+    /*
+    ////////////////////////////////////////
+    // Configuration
+    ////////////////////////////////////////
+    */
+
+    final IOContext mContext;
+
+    final InputStream mIn;
+
+    /*
+    ///////////////////////////////////////////////////////////////
+    // Input buffering
+    ///////////////////////////////////////////////////////////////
+    */
+
+    final byte[] mInputBuffer;
+
+    private int mInputPtr;
+
+    private int mInputLen;
+
+    /*
+    ///////////////////////////////////////////////////////////////
+    // Input location
+    ///////////////////////////////////////////////////////////////
+    */
+
+    /**
+     * Current number of input units (bytes or chars) that were processed in
+     * previous blocks,
+     * before contents of current input buffer.
+     *<p>
+     * Note: includes possible BOMs, if those were part of the input.
+     */
+    protected int mInputProcessed;
+
+    /*
+    ///////////////////////////////////////////////////////////////
+    // Data gathered
+    ///////////////////////////////////////////////////////////////
+    */
+
+    boolean mBigEndian = true;
+    int mBytesPerChar = 0; // 0 means "dunno yet"
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////////////////
+     */
+
+    private ByteSourceBootstrapper(IOContext ctxt, InputStream in)
+    {
+        mContext = ctxt;
+        mIn = in;
+        mInputBuffer = ctxt.allocReadIOBuffer();
+        mInputLen = mInputPtr = 0;
+        mInputProcessed = 0;
+    }
+
+    private ByteSourceBootstrapper(IOContext ctxt, byte[] inputBuffer, int inputStart, int inputLen)
+    {
+        mContext = ctxt;
+        mIn = null;
+        mInputBuffer = inputBuffer;
+        mInputPtr = inputStart;
+        mInputLen = (inputStart + inputLen);
+        // Need to offset this for correct location info
+        mInputProcessed = -inputStart;
+    }
+
+    public static Reader bootstrap(IOContext ctxt, InputStream in)
+        throws IOException, JsonParseException
+    {
+        return new ByteSourceBootstrapper(ctxt, in)._bootstrap();
+    }
+
+    public static Reader bootstrap(IOContext ctxt, byte[] inputBuffer, int inputStart, int inputLen)
+        throws IOException, JsonParseException
+    {
+        return new ByteSourceBootstrapper(ctxt, inputBuffer, inputStart, inputLen)._bootstrap();
+    }
+
+    /*
+    /////////////////////////////////////////////////////////////////
+    // Internal methods, parsing
+    /////////////////////////////////////////////////////////////////
+    */
+
+    /**
+     * @return Actual reader instance, if possibly valid content found;
+     *   exception otherwise
+     */
+    private Reader _bootstrap()
+        throws IOException, JsonParseException
+    {
+        boolean foundEncoding = false;
+
+        // First things first: BOM handling
+        /* Note: we can require 4 bytes to be read, since no
+         * combination of BOM + valid JSON content can have
+         * shorter length (shortest valid JSON content is single
+         * digit char, but BOMs are chosen such that combination
+         * is always at least 4 chars long)
+         */
+        if (ensureLoaded(4)) {
+            int quad =  (mInputBuffer[mInputPtr] << 24)
+                | ((mInputBuffer[mInputPtr+1] & 0xFF) << 16)
+                | ((mInputBuffer[mInputPtr+2] & 0xFF) << 8)
+                | (mInputBuffer[mInputPtr+3] & 0xFF);
+            
+            if (handleBOM(quad)) {
+                foundEncoding = true;
+            } else {
+                /* If no BOM, need to auto-detect based on first char;
+                 * this works since it must be 7-bit ascii (wrt. unicode
+                 * compatible encodings, only ones JSON can be transferred
+                 * over)
+                 */
+                // UTF-32?
+                if (checkUTF32(quad)) {
+                    foundEncoding = true;
+                } else if (checkUTF16(quad >>> 16)) {
+                    foundEncoding = true;
+                }
+            }
+        } else if (ensureLoaded(2)) {
+            int i16 = ((mInputBuffer[mInputPtr] & 0xFF) << 8)
+                | (mInputBuffer[mInputPtr+1] & 0xFF);
+            if (checkUTF16(i16)) {
+                foundEncoding = true;
+            }
+        }
+
+        /* Not found yet? As per specs, this means it must be UTF-8. */
+        Reader r;
+        String enc;
+
+        if (!foundEncoding) {
+            enc = "UTF-8";
+            r = new UTF8Reader(mContext, mIn, mInputBuffer, mInputPtr, mInputLen);
+        } else if (mBytesPerChar == 2) {
+            enc = mBigEndian ? "UTF-16BE" : "UTF-16LE";
+            mContext.setEncoding(enc);
+
+            // First: do we have a Stream? If not, need to create one:
+            InputStream in = mIn;
+            if (in == null) {
+                in = new ByteArrayInputStream(mInputBuffer, mInputPtr, mInputLen);
+            } else {
+                /* Also, if we have any read but unused input (usually true),
+                 * need to merge that input in:
+                 */
+                if (mInputPtr < mInputLen) {
+                    in = new MergedStream(mContext, in, mInputBuffer, mInputPtr, mInputLen);
+                }
+            }
+            r = new InputStreamReader(in, enc);
+        } else if (mBytesPerChar == 4) {
+            enc = mBigEndian ? "UTF-32BE" : "UTF-32LE";
+            r = new UTF32Reader(mContext, mIn, mInputBuffer, mInputPtr, mInputLen,
+                                mBigEndian);
+        } else {
+            throw new Error("Internal error"); // should never get here
+        }
+        mContext.setEncoding(enc);
+        return r;
+    }
+
+    /**
+     * @return True if a BOM was succesfully found, and encoding
+     *   thereby recognized.
+     */
+    private boolean handleBOM(int quad)
+        throws IOException
+    {
+        /* Handling of (usually) optional BOM (required for
+         * multi-byte formats); first 32-bit charsets:
+         */
+        switch (quad) {
+        case 0x0000FEFF:
+            mBigEndian = true;
+            mInputPtr += 4;
+            mBytesPerChar = 4;
+            return true;
+        case 0xFFFE0000: // UCS-4, LE?
+            mInputPtr += 4;
+            mBytesPerChar = 4;
+            mBigEndian = false;
+            return true;
+        case 0x0000FFFE: // UCS-4, in-order...
+            reportWeirdUCS4("2143"); // throws exception
+        case 0xFEFF0000: // UCS-4, in-order...
+            reportWeirdUCS4("3412"); // throws exception
+        }
+        // Ok, if not, how about 16-bit encoding BOMs?
+        int msw = quad >>> 16;
+        if (msw == 0xFEFF) { // UTF-16, BE
+            mInputPtr += 2;
+            mBytesPerChar = 2;
+            mBigEndian = true;
+            return true;
+        }
+        if (msw == 0xFFFE) { // UTF-16, LE
+            mInputPtr += 2;
+            mBytesPerChar = 2;
+            mBigEndian = false;
+            return true;
+        }
+        // And if not, then UTF-8 BOM?
+        if ((quad >>> 8) == 0xEFBBBF) { // UTF-8
+            mInputPtr += 3;
+            mBytesPerChar = 1;
+            mBigEndian = true; // doesn't really matter
+            return true;
+        }
+        return false;
+    }
+
+    private boolean checkUTF32(int quad)
+        throws IOException
+    {
+        /* Handling of (usually) optional BOM (required for
+         * multi-byte formats); first 32-bit charsets:
+         */
+        if ((quad >> 8) == 0) { // 0x000000?? -> UTF32-BE
+            mBigEndian = true;
+        } else if ((quad & 0x00FFFFFF) == 0) { // 0x??000000 -> UTF32-LE
+            mBigEndian = false;
+        } else if ((quad & ~0x00FF0000) == 0) { // 0x00??0000 -> UTF32-in-order
+            reportWeirdUCS4("3412");
+        } else if ((quad & ~0x0000FF00) == 0) { // 0x0000??00 -> UTF32-in-order
+            reportWeirdUCS4("2143");
+        } else {
+            // Can not be valid UTF-32 encoded JSON...
+            return false;
+        }
+        mInputPtr += 4;
+        mBytesPerChar = 4;
+        return true;
+    }
+
+    private boolean checkUTF16(int i16)
+    {
+        if ((i16 & 0xFF00) == 0) { // UTF-16BE
+            mBigEndian = true;
+        } else if ((i16 & 0x00FF) == 0) { // UTF-16LE
+            mBigEndian = false;
+        } else { // nope, not  UTF-16
+            return false;
+        }
+        mInputPtr += 2;
+        mBytesPerChar = 2;
+        return true;
+    }
+
+    /*
+    /////////////////////////////////////////////////////////////////
+    // Internal methods, problem reporting
+    /////////////////////////////////////////////////////////////////
+    */
+
+    private void reportWeirdUCS4(String type)
+        throws IOException
+    {
+        throw new CharConversionException("Unsupported UCS-4 endianness ("+type+") detected");
+    }
+
+    /*
+    /////////////////////////////////////////////////////////////////
+    // Internal methods, raw input access
+    /////////////////////////////////////////////////////////////////
+    */
+
+    protected boolean ensureLoaded(int minimum)
+        throws IOException
+    {
+        /* Let's assume here buffer has enough room -- this will always
+         * be true for the limited used this method gets
+         */
+        int gotten = (mInputLen - mInputPtr);
+        while (gotten < minimum) {
+            int count;
+
+            if (mIn == null) { // block source
+                count = -1;
+            } else {
+                count = mIn.read(mInputBuffer, mInputLen, mInputBuffer.length - mInputLen);
+            }
+            if (count < 1) {
+                return false;
+            }
+            mInputLen += count;
+            gotten += count;
+        }
+        return true;
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/io/IOContext.java b/src/java/org/codehaus/jackson/io/IOContext.java
new file mode 100644
index 0000000..3411447
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/IOContext.java
@@ -0,0 +1,185 @@
+package org.codehaus.jackson.io;
+
+import org.codehaus.jackson.util.BufferRecycler;
+import org.codehaus.jackson.util.TextBuffer;
+
+/**
+ * To limit number of configuration and state objects to pass, all
+ * contextual objects that need to be passed by the factory to
+ * readers and writers are combined under this object. One instance
+ * is created for each reader and writer.
+ */
+public final class IOContext
+{
+    // // // Configuration
+
+    final BufferRecycler mBufferRecycler;
+
+    /**
+     * Reference to the source object, which can be used for displaying
+     * location information
+     */
+    final Object mSourceRef;
+
+    /**
+     * Encoding used by the underlying stream, if known. Will be
+     * a Java compatible encoding name (as opposed to, say, xml
+     * specification defined encodings), usable as-is with JDK
+     * classes.
+     */
+    protected String mEncoding;
+
+    // // // Allocated buffers that need to be kept track of
+
+    /**
+     * Reference to the allocated I/O buffer for low-level input reading,
+     * if any allocated.
+     */
+    protected byte[] mReadIOBuffer = null;
+
+    /**
+     * Reference to the allocated I/O buffer for low-level input writing
+     * if any allocated.
+     */
+    protected byte[] mWriteIOBuffer = null;
+
+    /**
+     * Reference to the buffer allocated for tokenization purposes,
+     * in which character input is read, and from which it can be
+     * further returned.
+     */
+    protected char[] mTokenBuffer = null;
+
+    /**
+     * Reference to the buffer allocated for buffering it for
+     * output, before being encoded: generally this means concatenating
+     * output, then encoding when buffer fills up.
+     */
+    protected char[] mConcatBuffer = null;
+
+    /*
+    //////////////////////////////////////////////////////
+    // Life-cycle
+    //////////////////////////////////////////////////////
+     */
+
+    public IOContext(BufferRecycler br, Object sourceRef)
+    {
+        mBufferRecycler = br;
+        mSourceRef = sourceRef;
+    }
+
+    public void setEncoding(String enc)
+    {
+        mEncoding = enc;
+    }
+
+    public TextBuffer constructTextBuffer()
+    {
+        return new TextBuffer(mBufferRecycler);
+    }
+
+    /**
+     *<p>
+     * Note: the method can only be called once during its life cycle.
+     * This is to protect against accidental sharing.
+     */
+    public byte[] allocReadIOBuffer()
+    {
+        if (mReadIOBuffer != null) {
+            throw new IllegalStateException("Trying to call allocReadIOBuffer() second time");
+        }
+        mReadIOBuffer = mBufferRecycler.allocByteBuffer(BufferRecycler.ByteBufferType.READ_IO_BUFFER);
+        return mReadIOBuffer;
+    }
+
+    public byte[] allocWriteIOBuffer()
+    {
+        if (mWriteIOBuffer != null) {
+            throw new IllegalStateException("Trying to call allocWriteIOBuffer() second time");
+        }
+        mWriteIOBuffer = mBufferRecycler.allocByteBuffer(BufferRecycler.ByteBufferType.WRITE_IO_BUFFER);
+        return mWriteIOBuffer;
+    }
+
+    public char[] allocTokenBuffer()
+    {
+        if (mTokenBuffer != null) {
+            throw new IllegalStateException("Trying to call allocTokenBuffer() second time");
+        }
+        mTokenBuffer = mBufferRecycler.allocCharBuffer(BufferRecycler.CharBufferType.TOKEN_BUFFER);
+        return mTokenBuffer;
+    }
+
+    public char[] allocConcatBuffer()
+    {
+        if (mConcatBuffer != null) {
+            throw new IllegalStateException("Trying to call allocConcatBuffer() second time");
+        }
+        mConcatBuffer = mBufferRecycler.allocCharBuffer(BufferRecycler.CharBufferType.CONCAT_BUFFER);
+        return mConcatBuffer;
+    }
+
+    /**
+     * Method to call when all the processing buffers can be safely
+     * recycled.
+     */
+    public void releaseReadIOBuffer(byte[] buf)
+    {
+        if (buf != null) {
+            /* Let's do sanity checks to ensure once-and-only-once release,
+             * as well as avoiding trying to release buffers not owned
+             */
+            if (buf != mReadIOBuffer) {
+                throw new IllegalArgumentException("Trying to release buffer not owned by the context");
+            }
+            mReadIOBuffer = null;
+            mBufferRecycler.releaseByteBuffer(BufferRecycler.ByteBufferType.READ_IO_BUFFER, buf);
+        }
+    }
+
+    public void releaseWriteIOBuffer(byte[] buf)
+    {
+        if (buf != null) {
+            /* Let's do sanity checks to ensure once-and-only-once release,
+             * as well as avoiding trying to release buffers not owned
+             */
+            if (buf != mWriteIOBuffer) {
+                throw new IllegalArgumentException("Trying to release buffer not owned by the context");
+            }
+            mWriteIOBuffer = null;
+            mBufferRecycler.releaseByteBuffer(BufferRecycler.ByteBufferType.WRITE_IO_BUFFER, buf);
+        }
+    }
+
+    public void releaseTokenBuffer(char[] buf)
+    {
+        if (buf != null) {
+            if (buf != mTokenBuffer) {
+                throw new IllegalArgumentException("Trying to release buffer not owned by the context");
+            }
+            mTokenBuffer = null;
+            mBufferRecycler.releaseCharBuffer(BufferRecycler.CharBufferType.TOKEN_BUFFER, buf);
+        }
+    }
+
+    public void releaseConcatBuffer(char[] buf)
+    {
+        if (buf != null) {
+            if (buf != mConcatBuffer) {
+                throw new IllegalArgumentException("Trying to release buffer not owned by the context");
+            }
+            mConcatBuffer = null;
+            mBufferRecycler.releaseCharBuffer(BufferRecycler.CharBufferType.CONCAT_BUFFER, buf);
+        }
+    }
+
+    /*
+    //////////////////////////////////////////////////////
+    // Public API, accessors
+    //////////////////////////////////////////////////////
+     */
+
+    public Object getSourceReference() { return mSourceRef; }
+    public String getEncoding() { return mEncoding; }
+}
diff --git a/src/java/org/codehaus/jackson/io/MergedStream.java b/src/java/org/codehaus/jackson/io/MergedStream.java
new file mode 100644
index 0000000..a2f0d35
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/MergedStream.java
@@ -0,0 +1,142 @@
+package org.codehaus.jackson.io;
+
+import java.io.*;
+
+
+/**
+ * Simple {@link InputStream} implementation that is used to "unwind" some
+ * data previously read from an input stream; so that as long as some of
+ * that data remains, it's returned; but as long as it's read, we'll
+ * just use data from the underlying original stream. 
+ * This is similar to {@link java.io.PushbackInputStream}, but here there's
+ * only one implicit pushback, when instance is constructed.
+ */
+public final class MergedStream
+    extends InputStream
+{
+    final protected IOContext mContext;
+
+    final InputStream mIn;
+
+    byte[] mBuffer;
+
+    int mPtr;
+
+    final int mEnd;
+
+    public MergedStream(IOContext context,
+                        InputStream in, byte[] buf, int start, int end)
+    {
+        mContext = context;
+        mIn = in;
+        mBuffer = buf;
+        mPtr = start;
+        mEnd = end;
+    }
+
+    public int available()
+        throws IOException
+    {
+        if (mBuffer != null) {
+            return mEnd - mPtr;
+        }
+        return mIn.available();
+    }
+
+    public void close()
+        throws IOException
+    {
+        freeMergedBuffer();
+        mIn.close();
+    }
+
+    public void mark(int readlimit)
+    {
+        if (mBuffer == null) {
+            mIn.mark(readlimit);
+        }
+    }
+    
+    public boolean markSupported()
+    {
+        // Only supports marks past the initial rewindable section...
+        return (mBuffer == null) && mIn.markSupported();
+    }
+    
+    public int read()
+        throws IOException
+    {
+        if (mBuffer != null) {
+            int c = mBuffer[mPtr++] & 0xFF;
+            if (mPtr >= mEnd) {
+                freeMergedBuffer();
+            }
+            return c;
+        }
+        return mIn.read();
+    }
+    
+    public int read(byte[] b)
+        throws IOException
+    {
+        return read(b, 0, b.length);
+    }
+
+    public int 	read(byte[] b, int off, int len)
+        throws IOException
+    {
+        if (mBuffer != null) {
+            int avail = mEnd - mPtr;
+            if (len > avail) {
+                len = avail;
+            }
+            System.arraycopy(mBuffer, mPtr, b, off, len);
+            mPtr += len;
+            if (mPtr >= mEnd) {
+                freeMergedBuffer();
+            }
+            return len;
+        }
+        return mIn.read(b, off, len);
+    }
+
+    public void reset()
+        throws IOException
+    {
+        if (mBuffer == null) {
+            mIn.reset();
+        }
+    }
+
+    public long skip(long n)
+        throws IOException
+    {
+        long count = 0L;
+
+        if (mBuffer != null) {
+            int amount = mEnd - mPtr;
+
+            if (amount > n) { // all in pushed back segment?
+                mPtr += (int) n;
+                return amount;
+            }
+            freeMergedBuffer();
+            count += amount;
+            n -= amount;
+        }
+
+        if (n > 0) {
+            count += mIn.skip(n);
+        }
+        return count;
+    }
+
+    private void freeMergedBuffer()
+    {
+        byte[] buf = mBuffer;
+        if (buf != null) {
+            mBuffer = null;
+            mContext.releaseReadIOBuffer(buf);
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/io/NumberInput.java b/src/java/org/codehaus/jackson/io/NumberInput.java
new file mode 100644
index 0000000..7eab369
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/NumberInput.java
@@ -0,0 +1,57 @@
+package org.codehaus.jackson.io;
+
+public final class NumberInput
+{
+    /**
+     * Constants needed for parsing longs from basic int parsing methods
+     */
+    final static long L_BILLION = 1000000000;
+
+    /**
+     * Fast method for parsing integers that are known to fit into
+     * regular 32-bit signed int type. This means that length is
+     * between 1 and 9 digits (inclusive)
+     *<p>
+     * Note: public to let unit tests call it
+     */
+    public final static int parseInt(char[] digitChars, int offset, int len)
+    {
+        int num = digitChars[offset] - '0';
+        len += offset;
+        // This looks ugly, but appears the fastest way:
+        if (++offset < len) {
+            num = (num * 10) + (digitChars[offset] - '0');
+            if (++offset < len) {
+                num = (num * 10) + (digitChars[offset] - '0');
+                if (++offset < len) {
+                    num = (num * 10) + (digitChars[offset] - '0');
+                    if (++offset < len) {
+                        num = (num * 10) + (digitChars[offset] - '0');
+                        if (++offset < len) {
+                            num = (num * 10) + (digitChars[offset] - '0');
+                            if (++offset < len) {
+                                num = (num * 10) + (digitChars[offset] - '0');
+                                if (++offset < len) {
+                                    num = (num * 10) + (digitChars[offset] - '0');
+                                    if (++offset < len) {
+                                        num = (num * 10) + (digitChars[offset] - '0');
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        return num;
+    }
+
+    public final static long parseLong(char[] digitChars, int offset, int len)
+    {
+        // Note: caller must ensure length is [10, 18]
+        int len1 = len-9;
+        long val = parseInt(digitChars, offset, len1) * L_BILLION;
+        return val + (long) parseInt(digitChars, offset+len1, 9);
+    }
+
+}
diff --git a/src/java/org/codehaus/jackson/io/NumberOutput.java b/src/java/org/codehaus/jackson/io/NumberOutput.java
new file mode 100644
index 0000000..b3f4a67
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/NumberOutput.java
@@ -0,0 +1,255 @@
+package org.codehaus.jackson.io;
+
+public final class NumberOutput
+{
+    private final static char NULL_CHAR = (char) 0;
+
+    private static int MILLION = 1000000;
+    private static int BILLION = 1000000000;
+    private static long TEN_BILLION_L = 10000000000L;
+    private static long THOUSAND_L = 1000L;
+
+    private static long MIN_INT_AS_LONG = (long) Integer.MIN_VALUE;
+    private static long MAX_INT_AS_LONG = (long) Integer.MAX_VALUE;
+
+    final static String SMALLEST_INT = String.valueOf(Integer.MIN_VALUE);
+
+    final static String SMALLEST_LONG = String.valueOf(Long.MIN_VALUE);
+
+    final static char[] LEADING_TRIPLETS = new char[4000];
+    final static char[] FULL_TRIPLETS = new char[4000];
+    static {
+        /* Let's fill it with NULLs for ignorable leading digits,
+         * and digit chars for others
+         */
+        int ix = 0;
+        for (int i1 = 0; i1 < 10; ++i1) {
+            char f1 = (char) ('0' + i1);
+            char l1 = (i1 == 0) ? NULL_CHAR : f1;
+            for (int i2 = 0; i2 < 10; ++i2) {
+                char f2 = (char) ('0' + i2);
+                char l2 = (i1 == 0 && i2 == 0) ? NULL_CHAR : f2;
+                for (int i3 = 0; i3 < 10; ++i3) {
+                    // Last is never to be empty
+                    char f3 = (char) ('0' + i3);
+                    LEADING_TRIPLETS[ix] = l1;
+                    LEADING_TRIPLETS[ix+1] = l2;
+                    LEADING_TRIPLETS[ix+2] = f3;
+                    FULL_TRIPLETS[ix] = f1;
+                    FULL_TRIPLETS[ix+1] = f2;
+                    FULL_TRIPLETS[ix+2] = f3;
+                    ix += 4;
+                }
+            }
+        }
+    }
+
+    final static String[] sSmallIntStrs = new String[] {
+        "0","1","2","3","4","5","6","7","8","9","10"
+    };
+    final static String[] sSmallIntStrs2 = new String[] {
+        "-1","-2","-3","-4","-5","-6","-7","-8","-9","-10"
+    };
+
+    /**
+     * @return Offset within buffer after outputting int
+     */
+    public static int outputInt(int value, char[] buffer, int offset)
+    {
+        if (value < 0) {
+            if (value == Integer.MIN_VALUE) {
+                // Special case: no matching positive value within range
+                int len = SMALLEST_INT.length();
+                SMALLEST_INT.getChars(0, len, buffer, offset);
+                return (offset + len);
+            }
+            buffer[offset++] = '-';
+            value = -value;
+        }
+
+        if (value < MILLION) { // at most 2 triplets...
+            if (value < 1000) {
+                if (value < 10) {
+                    buffer[offset++] = (char) ('0' + value);
+                } else {
+                    offset = outputLeadingTriplet(value, buffer, offset);
+                }
+            } else {
+                int thousands = value / 1000;
+                value -= (thousands * 1000); // == value % 1000
+                offset = outputLeadingTriplet(thousands, buffer, offset);
+                offset = outputFullTriplet(value, buffer, offset);
+            }
+            return offset;
+        }
+
+        // ok, all 3 triplets included
+        /* Let's first hand possible billions separately before
+         * handling 3 triplets. This is possible since we know we
+         * can have at most '2' as billion count.
+         */
+        boolean hasBillions = (value >= BILLION);
+        if (hasBillions) {
+            value -= BILLION;
+            if (value >= BILLION) {
+                value -= BILLION;
+                buffer[offset++] = '2';
+            } else {
+                buffer[offset++] = '1';
+            }
+        }
+        int newValue = value / 1000;
+        int ones = (value - (newValue * 1000)); // == value % 1000
+        value = newValue;
+        newValue /= 1000;
+        int thousands = (value - (newValue * 1000));
+        
+        // value now has millions, which have 1, 2 or 3 digits
+        if (hasBillions) {
+            offset = outputFullTriplet(newValue, buffer, offset);
+        } else {
+            offset = outputLeadingTriplet(newValue, buffer, offset);
+        }
+        offset = outputFullTriplet(thousands, buffer, offset);
+        offset = outputFullTriplet(ones, buffer, offset);
+        return offset;
+    }
+
+    /**
+     * @return Offset within buffer after outputting int
+     */
+    public static int outputLong(long value, char[] buffer, int offset)
+    {
+        // First: does it actually fit in an int?
+        if (value < 0L) {
+            if (value >= MIN_INT_AS_LONG) {
+                return outputInt((int) value, buffer, offset);
+            }
+            if (value == Long.MIN_VALUE) {
+                // Special case: no matching positive value within range
+                int len = SMALLEST_LONG.length();
+                SMALLEST_LONG.getChars(0, len, buffer, offset);
+                return (offset + len);
+            }
+            buffer[offset++] = '-';
+            value = -value;
+        } else {
+            if (value <= MAX_INT_AS_LONG) {
+                return outputInt((int) value, buffer, offset);
+            }
+        }
+
+        /* Ok: real long print. Need to first figure out length
+         * in characters, and then print in from end to beginning
+         */
+        int origOffset = offset;
+        offset += calcLongStrLength(value);
+        int ptr = offset;
+
+        // First, with long arithmetics:
+        while (value > MAX_INT_AS_LONG) { // full triplet
+            ptr -= 3;
+            long newValue = value / THOUSAND_L;
+            int triplet = (int) (value - newValue * THOUSAND_L);
+            outputFullTriplet(triplet, buffer, ptr);
+            value = newValue;
+        }
+        // Then with int arithmetics:
+        int ivalue = (int) value;
+        while (ivalue >= 1000) { // still full triplet
+            ptr -= 3;
+            int newValue = ivalue / 1000;
+            int triplet = ivalue - (newValue * 1000);
+            outputFullTriplet(triplet, buffer, ptr);
+            ivalue = newValue;
+        }
+        // And finally, if anything remains, partial triplet
+        outputLeadingTriplet(ivalue, buffer, origOffset);
+
+        return offset;
+    }
+
+    public static String toString(int value)
+    {
+        // Lookup table for small values
+        if (value < sSmallIntStrs.length) {
+            if (value >= 0) {
+                return sSmallIntStrs[value];
+            }
+            int v2 = -value - 1;
+            if (v2 <= sSmallIntStrs2.length) {
+                return sSmallIntStrs2[v2];
+            }
+        }
+        // !!! TODO: further optimize?
+        return Integer.toString(value);
+    }
+
+    public static String toString(long value)
+    {
+        if (value <= Integer.MAX_VALUE &&
+            value >= Integer.MIN_VALUE) {
+            return toString((int) value);
+        }
+        // !!! TODO: further optimize?
+        return Long.toString(value);
+    }
+
+    public static String toString(double value)
+    {
+        // !!! TODO: optimize?
+        return Double.toString(value);
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////
+     */
+
+    private static int outputLeadingTriplet(int triplet, char[] buffer, int offset)
+    {
+        int digitOffset = (triplet << 2);
+        char c = LEADING_TRIPLETS[digitOffset++];
+        if (c != NULL_CHAR) {
+            buffer[offset++] = c;
+        }
+        c = LEADING_TRIPLETS[digitOffset++];
+        if (c != NULL_CHAR) {
+            buffer[offset++] = c;
+        }
+        // Last is required to be non-empty
+        buffer[offset++] = LEADING_TRIPLETS[digitOffset];
+        return offset;
+    }
+
+    private static int outputFullTriplet(int triplet, char[] buffer, int offset)
+    {
+        int digitOffset = (triplet << 2);
+        buffer[offset++] = FULL_TRIPLETS[digitOffset++];
+        buffer[offset++] = FULL_TRIPLETS[digitOffset++];
+        buffer[offset++] = FULL_TRIPLETS[digitOffset];
+        return offset;
+    }
+
+    /**
+     *<p>
+     * Pre-conditions: posValue is positive, and larger than
+     * Integer.MAX_VALUE (about 2 billions).
+     */
+    private static int calcLongStrLength(long posValue)
+    {
+        int len = 10;
+        long comp = TEN_BILLION_L;
+
+        // 19 is longest, need to worry about overflow
+        while (posValue >= comp) {
+            if (len == 19) {
+                break;
+            }
+            ++len;
+            comp = (comp << 3) + (comp << 1); // 10x
+        }
+        return len;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/io/UTF32Reader.java b/src/java/org/codehaus/jackson/io/UTF32Reader.java
new file mode 100644
index 0000000..3df452c
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/UTF32Reader.java
@@ -0,0 +1,213 @@
+package org.codehaus.jackson.io;
+
+import java.io.*;
+
+
+/**
+ * Since JDK does not come with UTF-32/UCS-4, let's implement a simple
+ * decoder to use.
+ */
+public final class UTF32Reader
+    extends BaseReader
+{
+    final boolean mBigEndian;
+
+    /**
+     * Although input is fine with full Unicode set, Java still uses
+     * 16-bit chars, so we may have to split high-order chars into
+     * surrogate pairs.
+     */
+    char mSurrogate = NULL_CHAR;
+
+    /**
+     * Total read character count; used for error reporting purposes
+     */
+    int mCharCount = 0;
+
+    /**
+     * Total read byte count; used for error reporting purposes
+     */
+    int mByteCount = 0;
+
+    /*
+    ////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////
+    */
+
+    public UTF32Reader(IOContext ctxt,
+                       InputStream in, byte[] buf, int ptr, int len,
+                       boolean isBigEndian)
+    {
+        super(ctxt, in, buf, ptr, len);
+        mBigEndian = isBigEndian;
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Public API
+    ////////////////////////////////////////
+    */
+
+    public int read(char[] cbuf, int start, int len)
+        throws IOException
+    {
+        // Already EOF?
+        if (mBuffer == null) {
+            return -1;
+        }
+        if (len < 1) {
+            return len;
+        }
+        // Let's then ensure there's enough room...
+        if (start < 0 || (start+len) > cbuf.length) {
+            reportBounds(cbuf, start, len);
+        }
+
+        len += start;
+        int outPtr = start;
+
+        // Ok, first; do we have a surrogate from last round?
+        if (mSurrogate != NULL_CHAR) {
+            cbuf[outPtr++] = mSurrogate;
+            mSurrogate = NULL_CHAR;
+            // No need to load more, already got one char
+        } else {
+            /* Note: we'll try to avoid blocking as much as possible. As a
+             * result, we only need to get 4 bytes for a full char.
+             */
+            int left = (mLength - mPtr);
+            if (left < 4) {
+                if (!loadMore(left)) { // (legal) EOF?
+                    return -1;
+                }
+            }
+        }
+
+        main_loop:
+        while (outPtr < len) {
+            int ptr = mPtr;
+            int ch;
+
+            if (mBigEndian) {
+                ch = (mBuffer[ptr] << 24) | ((mBuffer[ptr+1] & 0xFF) << 16)
+                    | ((mBuffer[ptr+2] & 0xFF) << 8) | (mBuffer[ptr+3] & 0xFF);
+            } else {
+                ch = (mBuffer[ptr] & 0xFF) | ((mBuffer[ptr+1] & 0xFF) << 8)
+                    | ((mBuffer[ptr+2] & 0xFF) << 16) | (mBuffer[ptr+3] << 24);
+            }
+            mPtr += 4;
+
+            // Does it need to be split to surrogates?
+            // (also, we can and need to verify illegal chars)
+            if (ch > 0xFFFF) { // need to split into surrogates?
+                if (ch > LAST_VALID_UNICODE_CHAR) {
+                    reportInvalid(ch, outPtr-start,
+                                  "(above "+Integer.toHexString(LAST_VALID_UNICODE_CHAR)+") ");
+                }
+                ch -= 0x10000; // to normalize it starting with 0x0
+                cbuf[outPtr++] = (char) (0xD800 + (ch >> 10));
+                // hmmh. can this ever be 0? (not legal, at least?)
+                ch = (0xDC00 | (ch & 0x03FF));
+                // Room for second part?
+                if (outPtr >= len) { // nope
+                    mSurrogate = (char) ch;
+                    break main_loop;
+                }
+            }
+            cbuf[outPtr++] = (char) ch;
+            if (mPtr >= mLength) {
+                break main_loop;
+            }
+        }
+
+        len = outPtr - start;
+        mCharCount += len;
+        return len;
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////
+    */
+
+    private void reportUnexpectedEOF(int gotBytes, int needed)
+        throws IOException
+    {
+        int bytePos = mByteCount + gotBytes;
+        int charPos = mCharCount;
+
+        throw new CharConversionException("Unexpected EOF in the middle of a 4-byte UTF-32 char: got "
+                                          +gotBytes+", needed "+needed
+                                          +", at char #"+charPos+", byte #"+bytePos+")");
+    }
+
+    private void reportInvalid(int value, int offset, String msg)
+        throws IOException
+    {
+        int bytePos = mByteCount + mPtr - 1;
+        int charPos = mCharCount + offset;
+
+        throw new CharConversionException("Invalid UTF-32 character 0x"
+                                          +Integer.toHexString(value)
+                                          +msg+" at char #"+charPos+", byte #"+bytePos+")");
+    }
+
+    /**
+     * @param available Number of "unused" bytes in the input buffer
+     *
+     * @return True, if enough bytes were read to allow decoding of at least
+     *   one full character; false if EOF was encountered instead.
+     */
+    private boolean loadMore(int available)
+        throws IOException
+    {
+        mByteCount += (mLength - available);
+
+        // Bytes that need to be moved to the beginning of buffer?
+        if (available > 0) {
+            if (mPtr > 0) {
+                for (int i = 0; i < available; ++i) {
+                    mBuffer[i] = mBuffer[mPtr+i];
+                }
+                mPtr = 0;
+            }
+            mLength = available;
+        } else {
+            /* Ok; here we can actually reasonably expect an EOF,
+             * so let's do a separate read right away:
+             */
+            mPtr = 0;
+            int count = mIn.read(mBuffer);
+            if (count < 1) {
+                mLength = 0;
+                if (count < 0) { // -1
+                    freeBuffers(); // to help GC?
+                    return false;
+                }
+                // 0 count is no good; let's err out
+                reportStrangeStream();
+            }
+            mLength = count;
+        }
+
+        /* Need at least 4 bytes; if we don't get that many, it's an
+         * error.
+         */
+        while (mLength < 4) {
+            int count = mIn.read(mBuffer, mLength, mBuffer.length - mLength);
+            if (count < 1) {
+                if (count < 0) { // -1, EOF... no good!
+                    freeBuffers(); // to help GC?
+                    reportUnexpectedEOF(mLength, 4);
+                }
+                // 0 count is no good; let's err out
+                reportStrangeStream();
+            }
+            mLength += count;
+        }
+        return true;
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/io/UTF8Reader.java b/src/java/org/codehaus/jackson/io/UTF8Reader.java
new file mode 100644
index 0000000..1a02a1b
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/UTF8Reader.java
@@ -0,0 +1,363 @@
+package org.codehaus.jackson.io;
+
+import java.io.*;
+
+
+/**
+ * Optimized Reader that reads UTF-8 encoded content from an input stream.
+ * In addition to doing (hopefully) optimal conversion, it can also take
+ * array of "pre-read" (leftover) bytes; this is necessary when preliminary
+ * stream/reader is trying to figure out character encoding.
+ */
+public final class UTF8Reader
+    extends BaseReader
+{
+    char mSurrogate = NULL_CHAR;
+
+    /**
+     * Total read character count; used for error reporting purposes
+     */
+    int mCharCount = 0;
+
+    /**
+     * Total read byte count; used for error reporting purposes
+     */
+    int mByteCount = 0;
+
+    /*
+    ////////////////////////////////////////
+    // Life-cycle
+    ////////////////////////////////////////
+    */
+
+    public UTF8Reader(IOContext ctxt,
+                      InputStream in, byte[] buf, int ptr, int len)
+    {
+        super(ctxt, in, buf, ptr, len);
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Public API
+    ////////////////////////////////////////
+    */
+
+    public int read(char[] cbuf, int start, int len)
+        throws IOException
+    {
+        // Already EOF?
+        if (mBuffer == null) {
+            return -1;
+        }
+        if (len < 1) {
+            return len;
+        }
+        // Let's then ensure there's enough room...
+        if (start < 0 || (start+len) > cbuf.length) {
+            reportBounds(cbuf, start, len);
+        }
+
+        len += start;
+        int outPtr = start;
+
+        // Ok, first; do we have a surrogate from last round?
+        if (mSurrogate != NULL_CHAR) {
+            cbuf[outPtr++] = mSurrogate;
+            mSurrogate = NULL_CHAR;
+            // No need to load more, already got one char
+        } else {
+            /* To prevent unnecessary blocking (esp. with network streams),
+             * we'll only require decoding of a single char
+             */
+            int left = (mLength - mPtr);
+
+            /* So; only need to load more if we can't provide at least
+             * one more character. We need not do thorough check here,
+             * but let's check the common cases here: either completely
+             * empty buffer (left == 0), or one with less than max. byte
+             * count for a single char, and starting of a multi-byte
+             * encoding (this leaves possibility of a 2/3-byte char
+             * that is still fully accessible... but that can be checked
+             * by the load method)
+             */
+            if (left < 4) {
+                // Need to load more?
+                if (left < 1 || mBuffer[mPtr] < 0) {
+                    if (!loadMore(left)) { // (legal) EOF?
+                        return -1;
+                    }
+                }
+            }
+        }
+
+        /* This may look silly, but using a local var is indeed faster
+         * (if and when HotSpot properly gets things running) than
+         * member variable...
+         */
+        byte[] buf = mBuffer;
+        int inPtr = mPtr;
+        int inBufLen = mLength;
+
+        main_loop:
+        while (outPtr < len) {
+            // At this point we have at least one byte available
+            int c = (int) buf[inPtr++];
+
+            /* Let's first do the quickie loop for common case; 7-bit
+             * ascii:
+             */
+            if (c >= 0) { // ascii? can probably loop, then
+                cbuf[outPtr++] = (char) c; // ok since MSB is never on
+
+                /* Ok, how many such chars could we safely process
+                 * without overruns? (will combine 2 in-loop comparisons
+                 * into just one)
+                 */
+                int outMax = (len - outPtr); // max output
+                int inMax = (inBufLen - inPtr); // max input
+                int inEnd = inPtr + ((inMax < outMax) ? inMax : outMax);
+
+                ascii_loop:
+                while (true) {
+                    if (inPtr >= inEnd) {
+                        break main_loop;
+                    }
+                    c = (int) buf[inPtr++];
+                    if (c < 0) { // multi-byte
+                        break ascii_loop;
+                    }
+                    cbuf[outPtr++] = (char) c;
+                }
+            }
+
+            int needed;
+
+            // Ok; if we end here, we got multi-byte combination
+            if ((c & 0xE0) == 0xC0) { // 2 bytes (0x0080 - 0x07FF)
+                c = (c & 0x1F);
+                needed = 1;
+            } else if ((c & 0xF0) == 0xE0) { // 3 bytes (0x0800 - 0xFFFF)
+                c = (c & 0x0F);
+                needed = 2;
+            } else if ((c & 0xF8) == 0xF0) {
+                // 4 bytes; double-char BS, with surrogates and all...
+                c = (c & 0x0F);
+                needed = 3;
+            } else {
+                reportInvalidInitial(c & 0xFF, outPtr-start);
+                // never gets here...
+                needed = 1;
+            }
+            /* Do we have enough bytes? If not, let's just push back the
+             * byte and leave, since we have already gotten at least one
+             * char decoded. This way we will only block (with read from
+             * input stream) when absolutely necessary.
+             */
+            if ((inBufLen - inPtr) < needed) {
+                --inPtr;
+                break main_loop;
+            }
+
+            int d = (int) buf[inPtr++];
+            if ((d & 0xC0) != 0x080) {
+                reportInvalidOther(d & 0xFF, outPtr-start);
+            }
+            c = (c << 6) | (d & 0x3F);
+
+            if (needed > 1) { // needed == 1 means 2 bytes total
+                d = buf[inPtr++]; // 3rd byte
+                if ((d & 0xC0) != 0x080) {
+                    reportInvalidOther(d & 0xFF, outPtr-start);
+                }
+                c = (c << 6) | (d & 0x3F);
+                if (needed > 2) { // 4 bytes? (need surrogates)
+                    d = buf[inPtr++];
+                    if ((d & 0xC0) != 0x080) {
+                        reportInvalidOther(d & 0xFF, outPtr-start);
+                    }
+                    c = (c << 6) | (d & 0x3F);
+                    if (c > LAST_VALID_UNICODE_CHAR) {
+                        reportInvalid(c, outPtr-start,
+                                      "(above "+Integer.toHexString(LAST_VALID_UNICODE_CHAR));
+                    }
+                    /* Ugh. Need to mess with surrogates. Ok; let's inline them
+                     * there, then, if there's room: if only room for one,
+                     * need to save the surrogate for the rainy day...
+                     */
+                    c -= 0x10000; // to normalize it starting with 0x0
+                    cbuf[outPtr++] = (char) (0xD800 + (c >> 10));
+                    // hmmh. can this ever be 0? (not legal, at least?)
+                    c = (0xDC00 | (c & 0x03FF));
+
+                    // Room for second part?
+                    if (outPtr >= len) { // nope
+                        mSurrogate = (char) c;
+                        break main_loop;
+                    }
+                    // sure, let's fall back to normal processing:
+                }
+
+                /* 08-Jun-2007, TSa: Not sure if it's really legal
+                 *   to get surrogate chars here: JSON specs do not
+                 *   prevent them, which is different from xml. So
+                 *   for now let's not worry about them. If checks
+                 *   are needed, can uncomment following:
+                 */
+
+                /*
+                else {
+                    // Otherwise, need to check that 3-byte chars are
+                    // legal ones (should not expand to surrogates)
+                    if (c >= 0xD800) {
+                        // But first, let's check max chars:
+                        if (c < 0xE000) {
+                            reportInvalid(c, outPtr-start, "(a surrogate character) ");
+                        }
+                    }
+                }
+                */
+            }
+            cbuf[outPtr++] = (char) c;
+            if (inPtr >= inBufLen) {
+                break main_loop;
+            }
+        }
+
+        mPtr = inPtr;
+        len = outPtr - start;
+        mCharCount += len;
+        return len;
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////
+    */
+
+    private void reportInvalidInitial(int mask, int offset)
+        throws IOException
+    {
+        // input (byte) ptr has been advanced by one, by now:
+        int bytePos = mByteCount + mPtr - 1;
+        int charPos = mCharCount + offset + 1;
+
+        throw new CharConversionException("Invalid UTF-8 start byte 0x"
+                                          +Integer.toHexString(mask)
+                                          +" (at char #"+charPos+", byte #"+bytePos+")");
+    }
+
+    private void reportInvalidOther(int mask, int offset)
+        throws IOException
+    {
+        int bytePos = mByteCount + mPtr - 1;
+        int charPos = mCharCount + offset;
+
+        throw new CharConversionException("Invalid UTF-8 middle byte 0x"
+                                          +Integer.toHexString(mask)
+                                          +" (at char #"+charPos+", byte #"+bytePos+")");
+    }
+
+    private void reportUnexpectedEOF(int gotBytes, int needed)
+        throws IOException
+    {
+        int bytePos = mByteCount + gotBytes;
+        int charPos = mCharCount;
+
+        throw new CharConversionException("Unexpected EOF in the middle of a multi-byte char: got "
+                                          +gotBytes+", needed "+needed
+                                          +", at char #"+charPos+", byte #"+bytePos+")");
+    }
+
+    private void reportInvalid(int value, int offset, String msg)
+        throws IOException
+    { 
+        int bytePos = mByteCount + mPtr - 1;
+        int charPos = mCharCount + offset;
+
+        throw new CharConversionException("Invalid UTF-8 character 0x"
+                                          +Integer.toHexString(value)+msg
+                                          +" at char #"+charPos+", byte #"+bytePos+")");
+    }
+
+    /**
+     * @param available Number of "unused" bytes in the input buffer
+     *
+     * @return True, if enough bytes were read to allow decoding of at least
+     *   one full character; false if EOF was encountered instead.
+     */
+    private boolean loadMore(int available)
+        throws IOException
+    {
+        mByteCount += (mLength - available);
+
+        // Bytes that need to be moved to the beginning of buffer?
+        if (available > 0) {
+            if (mPtr > 0) {
+                for (int i = 0; i < available; ++i) {
+                    mBuffer[i] = mBuffer[mPtr+i];
+                }
+                mPtr = 0;
+            }
+            mLength = available;
+        } else {
+            /* Ok; here we can actually reasonably expect an EOF,
+             * so let's do a separate read right away:
+             */
+            mPtr = 0;
+            int count = mIn.read(mBuffer);
+            if (count < 1) {
+                mLength = 0;
+                if (count < 0) { // -1
+                    freeBuffers(); // to help GC?
+                    return false;
+                }
+                // 0 count is no good; let's err out
+                reportStrangeStream();
+            }
+            mLength = count;
+        }
+
+        /* We now have at least one byte... and that allows us to
+         * calculate exactly how many bytes we need!
+         */
+        int c = (int) mBuffer[0];
+        if (c >= 0) { // single byte (ascii) char... cool, can return
+            return true;
+        }
+
+        // Ok, a multi-byte char, let's check how many bytes we'll need:
+        int needed;
+        if ((c & 0xE0) == 0xC0) { // 2 bytes (0x0080 - 0x07FF)
+            needed = 2;
+        } else if ((c & 0xF0) == 0xE0) { // 3 bytes (0x0800 - 0xFFFF)
+            needed = 3;
+        } else if ((c & 0xF8) == 0xF0) {
+            // 4 bytes; double-char BS, with surrogates and all...
+            needed = 4;
+        } else {
+            reportInvalidInitial(c & 0xFF, 0);
+            // never gets here... but compiler whines without this:
+            needed = 1;
+        }
+
+        /* And then we'll just need to load up to that many bytes;
+         * if an EOF is hit, that'll be an error. But we need not do
+         * actual decoding here, just load enough bytes.
+         */
+        while (mLength < needed) {
+            int count = mIn.read(mBuffer, mLength, mBuffer.length - mLength);
+            if (count < 1) {
+                if (count < 0) { // -1, EOF... no good!
+                    freeBuffers();
+                    reportUnexpectedEOF(mLength, needed);
+                }
+                // 0 count is no good; let's err out
+                reportStrangeStream();
+            }
+            mLength += count;
+        }
+        return true;
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/io/UTF8Writer.java b/src/java/org/codehaus/jackson/io/UTF8Writer.java
new file mode 100644
index 0000000..832ece7
--- /dev/null
+++ b/src/java/org/codehaus/jackson/io/UTF8Writer.java
@@ -0,0 +1,380 @@
+package org.codehaus.jackson.io;
+
+import java.io.*;
+
+
+public final class UTF8Writer
+    extends Writer
+{
+    final static int SURR1_FIRST = 0xD800;
+    final static int SURR1_LAST = 0xDBFF;
+    final static int SURR2_FIRST = 0xDC00;
+    final static int SURR2_LAST = 0xDFFF;
+
+    final protected IOContext mContext;
+
+    OutputStream mOut;
+
+    byte[] mOutBuffer;
+
+    final int mOutBufferLast;
+
+    int mOutPtr;
+
+    /**
+     * When outputting chars from BMP, surrogate pairs need to be coalesced.
+     * To do this, both pairs must be known first; and since it is possible
+     * pairs may be split, we need temporary storage for the first half
+     */
+    int mSurrogate = 0;
+
+    public UTF8Writer(IOContext ctxt, OutputStream out)
+    {
+        mContext = ctxt;
+        mOut = out;
+
+        mOutBuffer = ctxt.allocWriteIOBuffer();
+        /* Max. expansion for a single char (in unmodified UTF-8) is
+         * 4 bytes (or 3 depending on how you view it -- 4 when recombining
+         * surrogate pairs)
+         */
+        mOutBufferLast = mOutBuffer.length - 4;
+        mOutPtr = 0;
+    }
+
+    public Writer append(char c)
+        throws IOException
+    {
+        write(c);
+        return this;
+    }
+
+    public void close()
+        throws IOException
+    {
+        if (mOut != null) {
+            if (mOutPtr > 0) {
+                mOut.write(mOutBuffer, 0, mOutPtr);
+                mOutPtr = 0;
+            }
+            OutputStream out = mOut;
+            mOut = null;
+
+            byte[] buf = mOutBuffer;
+            if (buf != null) {
+                mOutBuffer = null;
+                mContext.releaseWriteIOBuffer(buf);
+            }
+
+            out.close();
+
+            /* Let's 'flush' orphan surrogate, no matter what; but only
+             * after cleanly closing everything else.
+             */
+            int code = mSurrogate;
+            mSurrogate = 0;
+            if (code > 0) {
+                throwIllegal(code);
+            }
+        }
+    }
+
+    public void flush()
+        throws IOException
+    {
+        if (mOutPtr > 0) {
+            mOut.write(mOutBuffer, 0, mOutPtr);
+            mOutPtr = 0;
+        }
+        mOut.flush();
+    }
+
+    public void write(char[] cbuf)
+        throws IOException
+    {
+        write(cbuf, 0, cbuf.length);
+    }
+
+    public void write(char[] cbuf, int off, int len)
+        throws IOException
+    {
+        if (len < 2) {
+            if (len == 1) {
+                write(cbuf[off]);
+            }
+            return;
+        }
+
+        // First: do we have a leftover surrogate to deal with?
+        if (mSurrogate > 0) {
+            char second = cbuf[off++];
+            --len;
+            write(convertSurrogate(second));
+            // will have at least one more char
+        }
+
+        int outPtr = mOutPtr;
+        byte[] outBuf = mOutBuffer;
+        int outBufLast = mOutBufferLast; // has 4 'spare' bytes
+
+        // All right; can just loop it nice and easy now:
+        len += off; // len will now be the end of input buffer
+
+        output_loop:
+        for (; off < len; ) {
+            /* First, let's ensure we can output at least 4 bytes
+             * (longest UTF-8 encoded codepoint):
+             */
+            if (outPtr >= outBufLast) {
+                mOut.write(outBuf, 0, outPtr);
+                outPtr = 0;
+            }
+
+            int c = cbuf[off++];
+            // And then see if we have an Ascii char:
+            if (c < 0x80) { // If so, can do a tight inner loop:
+                outBuf[outPtr++] = (byte)c;
+                // Let's calc how many ascii chars we can copy at most:
+                int maxInCount = (len - off);
+                int maxOutCount = (outBufLast - outPtr);
+
+                if (maxInCount > maxOutCount) {
+                    maxInCount = maxOutCount;
+                }
+                maxInCount += off;
+                ascii_loop:
+                while (true) {
+                    if (off >= maxInCount) { // done with max. ascii seq
+                        continue output_loop;
+                    }
+                    c = cbuf[off++];
+                    if (c >= 0x80) {
+                        break ascii_loop;
+                    }
+                    outBuf[outPtr++] = (byte) c;
+                }
+            }
+
+            // Nope, multi-byte:
+            if (c < 0x800) { // 2-byte
+                outBuf[outPtr++] = (byte) (0xc0 | (c >> 6));
+                outBuf[outPtr++] = (byte) (0x80 | (c & 0x3f));
+            } else { // 3 or 4 bytes
+                // Surrogates?
+                if (c < SURR1_FIRST || c > SURR2_LAST) {
+                    outBuf[outPtr++] = (byte) (0xe0 | (c >> 12));
+                    outBuf[outPtr++] = (byte) (0x80 | ((c >> 6) & 0x3f));
+                    outBuf[outPtr++] = (byte) (0x80 | (c & 0x3f));
+                    continue;
+                }
+                // Yup, a surrogate:
+                if (c > SURR1_LAST) { // must be from first range
+                    mOutPtr = outPtr;
+                    throwIllegal(c);
+                }
+                mSurrogate = c;
+                // and if so, followed by another from next range
+                if (off >= len) { // unless we hit the end?
+                    break;
+                }
+                c = convertSurrogate(cbuf[off++]);
+                if (c > 0x10FFFF) { // illegal in JSON as well as in XML
+                    mOutPtr = outPtr;
+                    throwIllegal(c);
+                }
+                outBuf[outPtr++] = (byte) (0xf0 | (c >> 18));
+                outBuf[outPtr++] = (byte) (0x80 | ((c >> 12) & 0x3f));
+                outBuf[outPtr++] = (byte) (0x80 | ((c >> 6) & 0x3f));
+                outBuf[outPtr++] = (byte) (0x80 | (c & 0x3f));
+            }
+        }
+        mOutPtr = outPtr;
+    }
+    
+    public void write(int c)
+        throws IOException
+    {
+        // First; do we have a left over surrogate?
+        if (mSurrogate > 0) {
+            c = convertSurrogate(c);
+            // If not, do we start with a surrogate?
+        } else if (c >= SURR1_FIRST && c <= SURR2_LAST) {
+            // Illegal to get second part without first:
+            if (c > SURR1_LAST) {
+                throwIllegal(c);
+            }
+            // First part just needs to be held for now
+            mSurrogate = c;
+            return;
+        }
+
+        if (mOutPtr >= mOutBufferLast) { // let's require enough room, first
+            mOut.write(mOutBuffer, 0, mOutPtr);
+            mOutPtr = 0;
+        }
+
+        if (c < 0x80) { // ascii
+            mOutBuffer[mOutPtr++] = (byte) c;
+        } else {
+            int ptr = mOutPtr;
+            if (c < 0x800) { // 2-byte
+                mOutBuffer[ptr++] = (byte) (0xc0 | (c >> 6));
+                mOutBuffer[ptr++] = (byte) (0x80 | (c & 0x3f));
+            } else if (c <= 0xFFFF) { // 3 bytes
+                mOutBuffer[ptr++] = (byte) (0xe0 | (c >> 12));
+                mOutBuffer[ptr++] = (byte) (0x80 | ((c >> 6) & 0x3f));
+                mOutBuffer[ptr++] = (byte) (0x80 | (c & 0x3f));
+            } else { // 4 bytes
+                if (c > 0x10FFFF) { // illegal
+                    throwIllegal(c);
+                }
+                mOutBuffer[ptr++] = (byte) (0xf0 | (c >> 18));
+                mOutBuffer[ptr++] = (byte) (0x80 | ((c >> 12) & 0x3f));
+                mOutBuffer[ptr++] = (byte) (0x80 | ((c >> 6) & 0x3f));
+                mOutBuffer[ptr++] = (byte) (0x80 | (c & 0x3f));
+            }
+            mOutPtr = ptr;
+        }
+    }
+
+    public void write(String str)
+        throws IOException
+    {
+        write(str, 0, str.length());
+    }
+
+    public void write(String str, int off, int len) 
+        throws IOException
+    {
+        if (len < 2) {
+            if (len == 1) {
+                write(str.charAt(off));
+            }
+            return;
+        }
+
+        // First: do we have a leftover surrogate to deal with?
+        if (mSurrogate > 0) {
+            char second = str.charAt(off++);
+            --len;
+            write(convertSurrogate(second));
+            // will have at least one more char (case of 1 char was checked earlier on)
+        }
+
+        int outPtr = mOutPtr;
+        byte[] outBuf = mOutBuffer;
+        int outBufLast = mOutBufferLast; // has 4 'spare' bytes
+
+        // All right; can just loop it nice and easy now:
+        len += off; // len will now be the end of input buffer
+
+        output_loop:
+        for (; off < len; ) {
+            /* First, let's ensure we can output at least 4 bytes
+             * (longest UTF-8 encoded codepoint):
+             */
+            if (outPtr >= outBufLast) {
+                mOut.write(outBuf, 0, outPtr);
+                outPtr = 0;
+            }
+
+            int c = str.charAt(off++);
+            // And then see if we have an Ascii char:
+            if (c < 0x80) { // If so, can do a tight inner loop:
+                outBuf[outPtr++] = (byte)c;
+                // Let's calc how many ascii chars we can copy at most:
+                int maxInCount = (len - off);
+                int maxOutCount = (outBufLast - outPtr);
+
+                if (maxInCount > maxOutCount) {
+                    maxInCount = maxOutCount;
+                }
+                maxInCount += off;
+                ascii_loop:
+                while (true) {
+                    if (off >= maxInCount) { // done with max. ascii seq
+                        continue output_loop;
+                    }
+                    c = str.charAt(off++);
+                    if (c >= 0x80) {
+                        break ascii_loop;
+                    }
+                    outBuf[outPtr++] = (byte) c;
+                }
+            }
+
+            // Nope, multi-byte:
+            if (c < 0x800) { // 2-byte
+                outBuf[outPtr++] = (byte) (0xc0 | (c >> 6));
+                outBuf[outPtr++] = (byte) (0x80 | (c & 0x3f));
+            } else { // 3 or 4 bytes
+                // Surrogates?
+                if (c < SURR1_FIRST || c > SURR2_LAST) {
+                    outBuf[outPtr++] = (byte) (0xe0 | (c >> 12));
+                    outBuf[outPtr++] = (byte) (0x80 | ((c >> 6) & 0x3f));
+                    outBuf[outPtr++] = (byte) (0x80 | (c & 0x3f));
+                    continue;
+                }
+                // Yup, a surrogate:
+                if (c > SURR1_LAST) { // must be from first range
+                    mOutPtr = outPtr;
+                    throwIllegal(c);
+                }
+                mSurrogate = c;
+                // and if so, followed by another from next range
+                if (off >= len) { // unless we hit the end?
+                    break;
+                }
+                c = convertSurrogate(str.charAt(off++));
+                if (c > 0x10FFFF) { // illegal, as per RFC 4627
+                    mOutPtr = outPtr;
+                    throwIllegal(c);
+                }
+                outBuf[outPtr++] = (byte) (0xf0 | (c >> 18));
+                outBuf[outPtr++] = (byte) (0x80 | ((c >> 12) & 0x3f));
+                outBuf[outPtr++] = (byte) (0x80 | ((c >> 6) & 0x3f));
+                outBuf[outPtr++] = (byte) (0x80 | (c & 0x3f));
+            }
+        }
+        mOutPtr = outPtr;
+    }
+
+    /*
+    ////////////////////////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method called to calculate UTF codepoint, from a surrogate pair.
+     */
+    private int convertSurrogate(int secondPart)
+        throws IOException
+    {
+        int firstPart = mSurrogate;
+        mSurrogate = 0;
+
+        // Ok, then, is the second part valid?
+        if (secondPart < SURR2_FIRST || secondPart > SURR2_LAST) {
+            throw new IOException("Broken surrogate pair: first char 0x"+Integer.toHexString(firstPart)+", second 0x"+Integer.toHexString(secondPart)+"; illegal combination");
+        }
+        return 0x10000 + ((firstPart - SURR1_FIRST) << 10) + (secondPart - SURR2_FIRST);
+    }
+
+    private void throwIllegal(int code)
+        throws IOException
+    {
+        if (code > 0x10FFFF) { // over max?
+            throw new IOException("Illegal character point (0x"+Integer.toHexString(code)+") to output; max is 0x10FFFF as per RFC 4627");
+        }
+        if (code >= SURR1_FIRST) {
+            if (code <= SURR1_LAST) { // Unmatched first part (closing without second part?)
+                throw new IOException("Unmatched first part of surrogate pair (0x"+Integer.toHexString(code)+")");
+            }
+            throw new IOException("Unmatched second part of surrogate pair (0x"+Integer.toHexString(code)+")");
+        }
+
+        // should we ever get this?
+        throw new IOException("Illegal character point (0x"+Integer.toHexString(code)+") to output");
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/BaseMapper.java b/src/java/org/codehaus/jackson/map/BaseMapper.java
new file mode 100644
index 0000000..1aa58ef
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/BaseMapper.java
@@ -0,0 +1,67 @@
+package org.codehaus.jackson.map;
+
+import org.codehaus.jackson.*;
+
+/**
+ * This base class defines API aspects that are shared
+ * between different concrete mapper types.
+ */
+public abstract class BaseMapper
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Shared public enums for configuration
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Enumeration that defines strategies available for dealing with
+     * duplicate field names (when mapping JSON to Java types).
+     */
+    public enum DupFields {
+        ERROR /* default */
+            , USE_FIRST
+            , USE_LAST
+            ;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Common config settings
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * This option defines how duplicate field names (from JSON input)
+     * are to be handled. Default is to throw a {@link JsonParseException}.
+     */
+    protected DupFields mCfgDupFields = DupFields.ERROR;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle (construction, configuration)
+    ////////////////////////////////////////////////////
+     */
+
+    public BaseMapper() { }
+
+    public void setDupFieldHandling(DupFields mode) { mCfgDupFields = mode; }
+    public DupFields getDupFieldHandling() { return mCfgDupFields; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Methods for sub-classes
+    ////////////////////////////////////////////////////
+     */
+
+    protected void throwInternal(String msg)
+    {
+        throw new RuntimeException("Internal error: "+msg);
+    }
+
+    protected void reportProblem(JsonParser jp, String msg)
+        throws JsonParseException
+    {
+        throw new JsonParseException(msg, jp.getTokenLocation());
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/JavaTypeMapper.java b/src/java/org/codehaus/jackson/map/JavaTypeMapper.java
new file mode 100644
index 0000000..340a717
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/JavaTypeMapper.java
@@ -0,0 +1,536 @@
+package org.codehaus.jackson.map;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.codehaus.jackson.*;
+
+/**
+ * This mapper (or, codec) provides for conversions between core
+ * JDK-defined Java types, and matching JSON constructs.
+ * It will use instances of {@link JsonParser} and {@link JsonGenerator}
+ * for implementing actual reading/writing of JSON.
+ *<p>
+ * In addition to mapping to/from textual JSON serialization using
+ * json parser and generator, mapper can also expose resulting
+ * Java containers via a parser or generator: either as the source
+ * of JSON events, or as target so that java objects can be
+ * constructed from calls to json generator.
+ */
+public class JavaTypeMapper
+    extends BaseMapper
+    implements JavaTypeSerializer
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Public enums for configuration
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Enumeration that defines strategies available for dealing with
+     * unknown Java object types (when mapping java objects to JSON)
+     */
+    public enum UnknownType {
+        /**
+         * This option defines that if a type is not recognized a
+         * {@link JsonGenerationException} is to be thrown
+         */
+        ERROR
+            /**
+             * This option means that if a type is not recognized,
+             * objects {@link Object#toString} method will be called
+             * and output will be done as JSON string.
+             */
+            ,OUTPUT_USING_TO_STRING /* default */
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Configuration settings
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Optional custom serializer, which can be called to handle
+     * Java types that the default handler can not handle.
+     * If set, it will be called for the types that the default
+     * serialization mechanism does not know how to explicitly
+     * deal with (i.e  not including possible eventual conversion
+     * to String, as per {@link #mCfgUnknownTypes} )
+     */
+    protected JavaTypeSerializer mCustomSerializer = null;
+
+    /**
+     * This defines how instances of unrecognized types (for JSON output)
+     * are to be handled. Default is to call <b>toString()</b> on such
+     * objects, and output result as String.
+     */
+    protected UnknownType mCfgUnknownTypes = UnknownType.OUTPUT_USING_TO_STRING;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle (construction, configuration)
+    ////////////////////////////////////////////////////
+     */
+
+    public JavaTypeMapper() { }
+
+    public void setCustomSerializer(JavaTypeSerializer ser) { mCustomSerializer = ser; }
+    public JavaTypeSerializer getCustomSerializer() { return mCustomSerializer; }
+
+    public void setUnkownTypeHandling(UnknownType mode) { mCfgUnknownTypes = mode; }
+    public UnknownType getUnkownTypeHandling() { return mCfgUnknownTypes; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, root-level mapping methods,
+    // mapping from JSON to Java types
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method that will use the current event of the underlying parser
+     * (and if there's no event yet, tries to advance to an event)
+     * to construct a value, and advance the parser to point to the
+     * next event, if any. For structured tokens (objects, arrays),
+     * will recursively handle and construct contained values.
+     */
+    public Object read(JsonParser jp)
+        throws IOException, JsonParseException
+    {
+        JsonToken curr = jp.getCurrentToken();
+        if (curr == null) {
+            curr  = jp.nextToken();
+            // We hit EOF? Nothing more to do, if so:
+            if (curr == null) {
+                return null;
+            }
+        }
+        Object result = readAndMap(jp, curr);
+        /* Need to also advance the reader, if we get this far,
+         * to allow handling of root level sequence of values
+         */
+        jp.nextToken();
+        return result;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, root-level mapping methods,
+    // mapping from Java types to JSON
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     *<p>
+     * Note: method will explicitly call flush on underlying
+     * generator.
+     */
+    public final void writeAny(JsonGenerator jg, Object value)
+        throws IOException, JsonParseException
+    {
+        writeAny(this, jg, value);
+        jg.flush();
+    }
+
+    /**
+     *<p>
+     * Note: the reason for using untyped map (instead of one with
+     *  key type of String) is to
+     * allow things like Enums as keys -- anything convertible
+     * to String will be ok for us.
+     *<p>
+     * Note: method will explicitly call flush on underlying
+     * generator.
+     */
+    public final void write(JsonGenerator jg, Map<Object,Object> value)
+        throws IOException, JsonParseException
+    {
+        writeValue(this, jg, value);
+        jg.flush();
+    }
+
+    /**
+     *<p>
+     * Note: method will explicitly call flush on underlying
+     * generator.
+     */
+    public final void write(JsonGenerator jg, Collection<Object> value)
+        throws IOException, JsonParseException
+    {
+        writeValue(this, jg, value);
+        jg.flush();
+    }
+
+    /**
+     *<p>
+     * Note: method will explicitly call flush on underlying
+     * generator.
+     */
+    public final void write(JsonGenerator jg, Object[] value)
+        throws IOException, JsonParseException
+    {
+        writeValue(this, jg, value);
+        jg.flush();
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, exposing Java constructs as JSON
+    // event source via JSONParser
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method that will take in a Java object that could have
+     * been created by mappers write methods, and construct
+     * a {@link JsonParser} that exposes contents as JSON
+     * tokens
+     */
+    public JsonParser createParserFor(Object data)
+        throws JsonParseException
+    {
+        // !!! TBI
+        return null;
+    }
+
+    /**
+     * Method that will create a JSON generator that will build
+     * Java objects as members of the current list, appending
+     * them at the end of the list.
+     */
+    public JsonGenerator createParserFor(List<Object> context)
+        throws JsonParseException
+    {
+        // !!! TBI
+        return null;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // JavaTypeSerializer implementation
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Implementation of the generic write method required by
+     * {@link JavaTypeSerializer}.
+     *<p>
+     * Note: since this is not the 'root' method of mapper, it will NOT
+     * explicitly flush the underlying generator after serializing
+     * passed object.
+     */
+    @SuppressWarnings("unchecked")
+	public final boolean writeAny(JavaTypeSerializer defaultSerializer,
+                                  JsonGenerator jgen, Object value)
+        throws IOException, JsonParseException
+    {
+        if (value == null) {
+            jgen.writeNull();
+            return true;
+        }
+        // Perhaps it's one of common core JDK types?
+        KnownClasses.JdkClasses jdkType = KnownClasses.findTypeFast(value);
+        if (jdkType == null) {
+            // If not, maybe we have an auxiliary converter?
+            if (mCustomSerializer != null) {
+                if (mCustomSerializer.writeAny(defaultSerializer, jgen, value)) {
+                    return true;
+                }
+            }
+            // And if not, maybe we can further introspect the type
+            jdkType = KnownClasses.findTypeSlow(value);
+
+            if (jdkType == null) {
+                // Nope, can't figure it out. Error or toString();
+                if (mCfgUnknownTypes == UnknownType.ERROR) {
+                    throw new JsonGenerationException("Unknown type ("+value.getClass().getName()+"): don't know how to handle");
+                }
+                jgen.writeString(value.toString());
+                return true;
+            }
+        }
+
+        // Yes, there is a generic conversion available:
+        switch (jdkType) {
+        case BOOLEAN:
+            jgen.writeBoolean(((Boolean) value).booleanValue());
+            break;
+        case STRING:
+        case STRING_LIKE:
+            jgen.writeString(value.toString());
+            break;
+        case NUMBER_INTEGER:
+            jgen.writeNumber(((Number) value).intValue());
+            break;
+        case NUMBER_LONG:
+            jgen.writeNumber(((Number) value).longValue());
+            break;
+        case NUMBER_DOUBLE:
+            jgen.writeNumber(((Number) value).doubleValue());
+            break;
+        case NUMBER_OTHER:
+            /* Could try figuring out exact type etc. etc., but we
+             * are probably best off by just asking object to serialize
+             * itself and assume that's good:
+             */
+            jgen.writeRaw(value.toString());
+            break;
+
+            // // // Then array types:
+            
+        case ARRAY_LONG:
+            jgen.writeStartArray();
+            {
+                long[] values = (long[]) value;
+                for (int i = 0, len = values.length; i < len; ++i) {
+                    jgen.writeNumber(values[i]);
+                }
+            }
+            jgen.writeEndArray();
+            break;
+        case ARRAY_INT:
+            jgen.writeStartArray();
+            {
+                int[] values = (int[]) value;
+                for (int i = 0, len = values.length; i < len; ++i) {
+                    jgen.writeNumber(values[i]);
+                }
+            }
+            jgen.writeEndArray();
+            break;
+        case ARRAY_SHORT:
+            jgen.writeStartArray();
+            {
+                short[] values = (short[]) value;
+                for (int i = 0, len = values.length; i < len; ++i) {
+                    jgen.writeNumber((int) values[i]);
+                }
+            }
+            jgen.writeEndArray();
+            break;
+        case ARRAY_CHAR:
+            /* This is a pecualiar type: let's assume they really want
+             * to output the String contained, instead of individual
+             * chars
+             */
+            {
+                char[] text = (char[]) value;
+                jgen.writeString(text, 0, text.length);
+            }
+            jgen.writeEndArray();
+            break;
+
+        case ARRAY_BYTE:
+            /* Hmmh. As with char arrays, it's not a JSON array,
+             * but binary data
+             */
+            {
+                byte[] data = (byte[]) value;
+                jgen.writeBinary(data, 0, data.length);
+            }
+            break;
+
+        case ARRAY_DOUBLE:
+            jgen.writeStartArray();
+            {
+                double[] values = (double[]) value;
+                for (int i = 0, len = values.length; i < len; ++i) {
+                    jgen.writeNumber(values[i]);
+                }
+            }
+            jgen.writeEndArray();
+            break;
+        case ARRAY_FLOAT:
+            jgen.writeStartArray();
+            {
+                float[] values = (float[]) value;
+                for (int i = 0, len = values.length; i < len; ++i) {
+                    jgen.writeNumber(values[i]);
+                }
+            }
+            jgen.writeEndArray();
+            break;
+
+        case ARRAY_BOOLEAN:
+            jgen.writeStartArray();
+            {
+                boolean[] values = (boolean[]) value;
+                for (int i = 0, len = values.length; i < len; ++i) {
+                    jgen.writeBoolean(values[i]);
+                }
+            }
+            jgen.writeEndArray();
+            break;
+
+        case ARRAY_OBJECT:
+            return writeValue(defaultSerializer, jgen, (Object[]) value);
+
+            // // // And finally java.util Collection types:
+
+        case MAP:
+            return writeValue(defaultSerializer, jgen, (Map<Object,Object>) value);
+
+        case LIST_INDEXED:
+            jgen.writeStartArray();
+            {
+                List<Object> l = (List<Object>) value;
+                for (int i = 0, len = l.size(); i < len; ++i) {
+                    writeAny(defaultSerializer, jgen, l.get(i));
+                }
+            }
+            jgen.writeEndArray();
+            break;
+            
+        case LIST_OTHER:
+        case COLLECTION:
+            return writeValue(defaultSerializer, jgen, (Collection<Object>) value);
+            
+        default: // should never get here
+            throwInternal("unhandled internal type: "+jdkType);
+        }
+
+        return true;
+    }
+
+    /**
+     * Implementation of the typed map/object write method required by
+     * {@link JavaTypeSerializer}.
+     *<p>
+     * Note: since this is not the 'root' method of mapper, it will NOT
+     * explicitly flush the underlying generator after serializing
+     * passed object.
+     */
+    public boolean writeValue(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Map<Object,Object> value)
+        throws IOException, JsonParseException
+    {
+        jgen.writeStartObject();
+        for (Map.Entry<Object,Object> me: value.entrySet()) {
+            jgen.writeFieldName(me.getKey().toString());
+            writeAny(defaultSerializer, jgen, me.getValue());
+        }
+        jgen.writeEndObject();
+        return true;
+    }
+
+    /**
+     * Implementation of the typed list/array write method required by
+     * {@link JavaTypeSerializer}.
+     *<p>
+     * Note: since this is not the 'root' method of mapper, it will NOT
+     * explicitly flush the underlying generator after serializing
+     * passed object.
+     */
+    public boolean writeValue(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Collection<Object> values)
+        throws IOException, JsonParseException
+    {
+        jgen.writeStartArray();
+        if (!values.isEmpty()) {
+            for (Object ob : values) {
+                writeAny(defaultSerializer, jgen, ob);
+            }
+        }
+        jgen.writeEndArray();
+        return true;
+    }
+
+    /**
+     * Implementation of the typed list/array write method required by
+     * {@link JavaTypeSerializer}.
+     *<p>
+     * Note: since this is not the 'root' method of mapper, it will NOT
+     * explicitly flush the underlying generator after serializing
+     * passed object.
+     */
+    public boolean writeValue(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Object[] values)
+        throws IOException, JsonParseException
+    {
+        jgen.writeStartArray();
+        for (int i = 0, len = values.length; i < len; ++i) {
+            writeAny(defaultSerializer, jgen, values[i]);
+        }
+        jgen.writeEndArray();
+        return true;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////////////////
+     */
+
+    protected Object readAndMap(JsonParser jp, JsonToken currToken)
+        throws IOException, JsonParseException
+    {
+        switch (currToken) {
+        case START_OBJECT:
+            {
+                LinkedHashMap<String, Object> result = new LinkedHashMap<String, Object>();
+
+                while ((currToken = jp.nextToken()) != JsonToken.END_OBJECT) {
+                    if (currToken != JsonToken.FIELD_NAME) {
+                        reportProblem(jp, "Unexpected token ("+currToken+"), expected FIELD_NAME");
+                    }
+                    String fieldName = jp.getText();
+                    Object  value = readAndMap(jp, jp.nextToken());
+
+                    if (mCfgDupFields == DupFields.ERROR) {
+                        Object old = result.put(fieldName, value);
+                        if (old != null) {
+                            reportProblem(jp, "Duplicate value for field '"+fieldName+"', when dup fields mode is "+mCfgDupFields);
+                        }
+                    } else if (mCfgDupFields == DupFields.USE_LAST) {
+                        // Easy, just add
+                        result.put(fieldName, value);
+                    } else { // use first; need to ensure we don't yet have it
+                        if (!result.containsKey(fieldName)) {
+                            result.put(fieldName, value);
+                        }
+                    }
+                }
+                return result;
+            }
+
+        case START_ARRAY:
+            {
+                ArrayList<Object> result = new ArrayList<Object>();
+                while ((currToken = jp.nextToken()) != JsonToken.END_ARRAY) {
+                    Object value = readAndMap(jp, currToken);
+                    result.add(value);
+                }
+                return result;
+            }
+
+        case VALUE_STRING:
+            return jp.getText();
+
+        case VALUE_NUMBER_INT:
+        case VALUE_NUMBER_FLOAT:
+            return jp.getNumberValue();
+
+        case VALUE_TRUE:
+            return Boolean.TRUE;
+
+        case VALUE_FALSE:
+            return Boolean.FALSE;
+
+        case VALUE_NULL:
+            return null;
+
+            /* These states can not be mapped; input stream is
+             * off by an event or two
+             */
+
+        case FIELD_NAME:
+        case END_OBJECT:
+        case END_ARRAY:
+            reportProblem(jp, "Can not map token "+currToken+": stream off by a token or two?");
+
+        default: // sanity check, should never happen
+            throwInternal("Unrecognized event type: "+currToken);
+            return null; // never gets this far
+        }
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/map/JavaTypeSerializer.java b/src/java/org/codehaus/jackson/map/JavaTypeSerializer.java
new file mode 100644
index 0000000..159c72c
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/JavaTypeSerializer.java
@@ -0,0 +1,52 @@
+package org.codehaus.jackson.map;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.codehaus.jackson.*;
+
+/**
+ * This interface defines the core API that some mappers expose, as
+ * well as what they expect helper classes to expose.
+ * Only subset of all possible output methods are included, mostly
+ * because some simple/primitive types (Strings, numbers) are easy
+ * to directly output using a {@link JsonGenerator} instance; and
+ * for others because of limitations on overloading (List being
+ * an instance of Collection, can not overload). Serializing of
+ * any type not directly or indirectly supported can be serialized
+ * via call to generic {@link #writeAny} method.
+ *<p>
+ * Note about <code>defaultSerializer</code> argument: this is meant to allow
+ * specialized serializers to only handle immediate container objects,
+ * but to dispatch contents to the default handler.
+ */
+public interface JavaTypeSerializer
+{
+    /**
+     * Method that can be called to ask implementation to serialize
+     * a given value of unknown type. Implementation should either
+     * handle serialization of the value (including its members as
+     * necessary, some or all of which can be dispatched to
+     * <b>defaultSerializer</b> serializer) and return true; or return false
+     * to indicate it does not know how to serialize the value.
+     *<p>
+     * Note: implementations of these methods are not required to
+     * flush the underlying generator after writing output.
+     *
+     * @param defaultSerializer Default serializer that child serializer can
+     *    call to handle contained types. It is NOT to be called for
+     *    handling <b>value</b> itself, as that could lead to infinite
+     *    recursion.
+     */
+    public boolean writeAny(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Object value)
+        throws IOException, JsonParseException;
+
+    public boolean writeValue(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Map<Object,Object> value)
+        throws IOException, JsonParseException;
+
+    public boolean writeValue(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Collection<Object> value)
+        throws IOException, JsonParseException;
+
+    public boolean writeValue(JavaTypeSerializer defaultSerializer, JsonGenerator jgen, Object[] value)
+        throws IOException, JsonParseException;
+}
diff --git a/src/java/org/codehaus/jackson/map/JsonNode.java b/src/java/org/codehaus/jackson/map/JsonNode.java
new file mode 100644
index 0000000..1633739
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/JsonNode.java
@@ -0,0 +1,258 @@
+package org.codehaus.jackson.map;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.*;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+
+/**
+ * Base class for all JSON nodes, used with the "dynamic" (JSON type)
+ * mapper
+ */
+public abstract class JsonNode
+{
+    final static List<JsonNode> NO_NODES = Collections.emptyList();
+    final static List<String> NO_STRINGS = Collections.emptyList();
+
+    protected JsonNode() { }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, type introspection
+    ////////////////////////////////////////////////////
+     */
+
+    // // First high-level division between values, containers and "missing"
+
+    /**
+     * Method that returns true for all value nodes: ones that 
+     * are not containers, and that do not represent "missing" nodes
+     * in the path. Such value nodes represent String, Number, Boolean
+     * and null values from JSON.
+     *<p>
+     * Note: one and only one of methods {@link #isValueNode},
+     * {@link #isContainerNode} and {@link #isMissingNode} ever
+     * returns true for any given node.
+     */
+    public boolean isValueNode() { return false; }
+
+    /**
+     * Method that returns true for container nodes: Arrays and Objects.
+     *<p>
+     * Note: one and only one of methods {@link #isValueNode},
+     * {@link #isContainerNode} and {@link #isMissingNode} ever
+     * returns true for any given node.
+     */
+    public boolean isContainerNode() { return false; }
+
+    /**
+     * Method that returns true for "virtual" nodes which represent
+     * missing entries constructed by path accessor methods when
+     * there is no actual node matching given criteria.
+     *<p>
+     * Note: one and only one of methods {@link #isValueNode},
+     * {@link #isContainerNode} and {@link #isMissingNode} ever
+     * returns true for any given node.
+     */
+    public boolean isMissingNode() { return false; }
+
+    // // Then more specific type introspection
+    // // (along with defaults to be overridden)
+
+    public boolean isArray() { return false; }
+    public boolean isObject() { return false; }
+    public boolean isNumber() { return false; }
+    public boolean isIntegralNumber() { return false; }
+    public boolean isFloatingPointNumber() { return false; }
+
+    public boolean isInt() { return false; }
+    public boolean isLong() { return false; }
+    public boolean isDouble() { return false; }
+    public boolean isBigDecimal() { return false; }
+
+    public boolean isTextual() { return false; }
+    public boolean isBoolean() { return false; }
+    public boolean isNull() { return false; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, value access
+    ////////////////////////////////////////////////////
+     */
+
+    public String getTextValue() { return null; }
+    public boolean getBooleanValue() { return false; }
+    public Number getNumberValue() { return Integer.valueOf(getIntValue()); }
+    public int getIntValue() { return 0; }
+    public long getLongValue() { return 0L; }
+    public double getDoubleValue() { return 0.0; }
+    public BigDecimal getDecimalValue() { return BigDecimal.ZERO; }
+
+    /**
+     * Method for accessing value of the specified element of
+     * an array node. If this node is not an array (or index is
+     * out of range), null will be returned.
+     *
+     * @return Node that represent value of the specified element,
+     *   if this node is an array and has specified element.
+     *   Null otherwise.
+     */
+    public JsonNode getElementValue(int index) { return null; }
+
+    /**
+     * Method for accessing value of the specified field of
+     * an object node. If this node is not an object (or it
+     * does not have a value for specified field name), null
+     * is returned.
+     *
+     * @return Node that represent value of the specified field,
+     *   if this node is an object and has value for the specified
+     *   field. Null otherwise.
+     */
+    public JsonNode getFieldValue(String fieldName) { return null; }
+
+    /**
+     * Method that will return valid String representation of
+     * the container value, if the node is a value node
+     * (method {@link #isValueNode} returns true), otherwise null.
+     *<p>
+     * Note: to serialize nodes of any type, you should call
+     * {@link #toString} instead.
+     */
+    public abstract String getValueAsText();
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, container access
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * @return For non-container nodes returns 0; for arrays number of
+     *   contained elements, and for objects number of fields.
+     */
+    public int size() { return 0; }
+
+    public Iterator<JsonNode> getElements() { return NO_NODES.iterator(); }
+
+    public Iterator<String> getFieldNames() { return NO_STRINGS.iterator(); }
+    public Iterator<JsonNode> getFieldValues() { return NO_NODES.iterator(); }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, container mutators
+    ////////////////////////////////////////////////////
+     */
+
+    public void appendElement(JsonNode node) {
+        reportNoArrayMods();
+    }
+
+    // TODO: add convenience methods (appendElement(int x) etc)
+
+    public void insertElement(int index, JsonNode value) {
+        reportNoArrayMods();
+    }
+
+    public JsonNode removeElement(int index) {
+        reportNoArrayMods();
+        return null;
+    }
+
+    public JsonNode removeElement(String fieldName) {
+        reportNoObjectMods();
+        return null;
+    }
+
+    // TODO: add convenience methods (insertElement(int x) etc)
+
+    public JsonNode setElement(int index, JsonNode value) {
+        reportNoArrayMods();
+        return null;
+    }
+
+    public JsonNode setElement(String fieldName, JsonNode value) {
+        reportNoObjectMods();
+        return null;
+    }
+
+    // TODO: add convenience methods (setElement(String, int) etc)
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, path handling
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * This method is similar to {@link #getFieldValue}, except
+     * that instead of returning null if no such value exists (due
+     * to this node not being an object, or object not having value
+     * for the specified field),
+     * a "missing node" (node that returns true for
+     * {@link #isMissingNode}) will be returned. This allows for
+     * convenient and safe chained access via path calls.
+     */
+    public abstract JsonNode getPath(String fieldName);
+
+    /**
+     * This method is similar to {@link #getElementValue}, except
+     * that instead of returning null if no such element exists (due
+     * to index being out of range, or this node not being an array),
+     * a "missing node" (node that returns true for
+     * {@link #isMissingNode}) will be returned. This allows for
+     * convenient and safe chained access via path calls.
+     */
+    public abstract JsonNode getPath(int index);
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, serialization
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method that can be called to serialize this node and
+     * all of its descendants using specified JSON generator.
+     */
+    public abstract void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException;
+
+    /*
+    ////////////////////////////////////////////////////
+    // Overridden standard methods
+    ////////////////////////////////////////////////////
+     */
+    
+    /**
+     * Let's mark this standard method as abstract to ensure all
+     * implementation classes define it
+     */
+    @Override
+    public abstract String toString();
+
+    /**
+     * Let's mark this standard method as abstract to ensure all
+     * implementation classes define it
+     */
+    @Override
+    public abstract boolean equals(Object o);
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////////////////
+     */
+
+    protected JsonNode reportNoArrayMods()
+    {
+        throw new UnsupportedOperationException("Node of type "+getClass()+" does not support appendElement, insertElement or setElement(int, ...) operations (only ArrayNodes do)");
+    }
+
+    protected JsonNode reportNoObjectMods()
+    {
+        throw new UnsupportedOperationException("Node of type "+getClass()+" does not support setElement(String, ...) operations (only ObjectNodes do)");
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/JsonTypeMapper.java b/src/java/org/codehaus/jackson/map/JsonTypeMapper.java
new file mode 100644
index 0000000..bc14330
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/JsonTypeMapper.java
@@ -0,0 +1,98 @@
+package org.codehaus.jackson.map;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.impl.JsonTypeMapperBase;
+
+/**
+ * This mapper (or, codec) provides mapping between JSON,
+ * and Tree-like structure that consists of child-linked 
+ * nodes that can be traversed with simple path operations
+ * (indexing arrays by element, objects by field name).
+ *<p>
+ * The main difference to {@link JavaTypeMapper} is that
+ * no casting should ever be necessary, and as such
+ * access is more convenient if expected structure is
+ * known in advance. Typing in general is simple, 
+ * since only the base node type is needed for
+ * all operations.
+ *<p>
+ * Thing to note about serializing (writing) json types:
+ * mapper does not add specific support, since
+ * {@link JsonNode} instances already have
+ * {@link JsonNode#writeTo} method.
+ */
+public class JsonTypeMapper
+    extends JsonTypeMapperBase
+{
+    /*
+    ////////////////////////////////////////////////////
+    // Life-cycle (construction, configuration)
+    ////////////////////////////////////////////////////
+     */
+
+    public JsonTypeMapper() { super(); }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, root-level mapping methods,
+    // mapping from JSON content to nodes
+    ////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method that will use the current event of the underlying parser
+     * (and if there's no event yet, tries to advance to an event)
+     * to construct a node, and advance the parser to point to the
+     * next event, if any. For structured tokens (objects, arrays),
+     * will recursively handle and construct contained nodes.
+     */
+    public JsonNode read(JsonParser jp)
+        throws IOException, JsonParseException
+    {
+        JsonToken curr = jp.getCurrentToken();
+        if (curr == null) {
+            curr  = jp.nextToken();
+            // We hit EOF? Nothing more to do, if so:
+            if (curr == null) {
+                return null;
+            }
+        }
+
+        JsonNode result = readAndMap(jp, curr);
+
+        /* Need to also advance the reader, if we get this far,
+         * to allow handling of root level sequence of values
+         */
+        jp.nextToken();
+        return result;
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Factory methods
+    ////////////////////////////////////////////////////
+     */
+
+    // Note: these come straight from the base class:
+
+    // public ArrayNode arrayNode()
+    // public ObjectNode objectNode()
+    // public NullNode nullNode()
+
+    // public TextNode textNode(String text)
+
+    // public BooleanNode booleanNode(boolean v)
+
+    // public IntNode intNode(int v)
+    // public LongNode longNode(long v)
+    // public DoubleNode doubleode(double v)
+    // public DecimalNode decimalNode(BigDecimal v)
+
+    /*
+    ////////////////////////////////////////////////////
+    // Internal methods, overridable
+    ////////////////////////////////////////////////////
+     */
+}
diff --git a/src/java/org/codehaus/jackson/map/KnownClasses.java b/src/java/org/codehaus/jackson/map/KnownClasses.java
new file mode 100644
index 0000000..dece52a
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/KnownClasses.java
@@ -0,0 +1,145 @@
+package org.codehaus.jackson.map;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.*;
+
+/**
+ * Helper class used for fast handling of "well-known" JDK primitive
+ * wrapper and data struct classes.
+ */
+final class KnownClasses
+{
+    public enum JdkClasses {
+        BOOLEAN,
+            STRING, STRING_LIKE,
+
+            NUMBER_INTEGER, NUMBER_LONG, NUMBER_DOUBLE,
+            NUMBER_OTHER, // to be converted via 'raw'...
+
+            ARRAY_LONG, ARRAY_INT, ARRAY_SHORT, ARRAY_CHAR, ARRAY_BYTE,
+            ARRAY_DOUBLE, ARRAY_FLOAT,
+            ARRAY_BOOLEAN,
+            ARRAY_OBJECT,
+
+            MAP, LIST_INDEXED, LIST_OTHER, COLLECTION
+    };
+
+    final static HashMap<String, JdkClasses> mConcrete = 
+        new HashMap<String, JdkClasses>();
+    static {
+        // Boolean type
+        mConcrete.put(Boolean.class.getName(), JdkClasses.BOOLEAN);
+
+        /* String and string-like types (note: date types explicitly
+         * not included -- can use either textual or numeric serialization)
+         */
+        mConcrete.put(String.class.getName(), JdkClasses.STRING);
+        mConcrete.put(StringBuffer.class.getName(), JdkClasses.STRING_LIKE);
+        mConcrete.put(StringBuilder.class.getName(), JdkClasses.STRING_LIKE);
+        mConcrete.put(Character.class.getName(), JdkClasses.STRING_LIKE);
+
+        // Arrays of various types (including common object types)
+
+        mConcrete.put(new long[0].getClass().getName(), JdkClasses.ARRAY_LONG);
+        mConcrete.put(new int[0].getClass().getName(), JdkClasses.ARRAY_INT);
+        mConcrete.put(new short[0].getClass().getName(), JdkClasses.ARRAY_SHORT);
+        mConcrete.put(new char[0].getClass().getName(), JdkClasses.ARRAY_CHAR);
+        mConcrete.put(new byte[0].getClass().getName(), JdkClasses.ARRAY_BYTE);
+        mConcrete.put(new double[0].getClass().getName(), JdkClasses.ARRAY_DOUBLE);
+        mConcrete.put(new float[0].getClass().getName(), JdkClasses.ARRAY_FLOAT);
+        mConcrete.put(new boolean[0].getClass().getName(), JdkClasses.ARRAY_BOOLEAN);
+
+        mConcrete.put(new Object[0].getClass().getName(), JdkClasses.ARRAY_OBJECT);
+        mConcrete.put(new String[0].getClass().getName(), JdkClasses.ARRAY_OBJECT);
+
+        // Numbers, limited length integral
+        mConcrete.put(Byte.class.getName(), JdkClasses.NUMBER_INTEGER);
+        mConcrete.put(Short.class.getName(), JdkClasses.NUMBER_INTEGER);
+        mConcrete.put(Integer.class.getName(), JdkClasses.NUMBER_INTEGER);
+        mConcrete.put(Long.class.getName(), JdkClasses.NUMBER_LONG);
+
+        // Numbers, limited length floating point
+        mConcrete.put(Float.class.getName(), JdkClasses.NUMBER_DOUBLE);
+        mConcrete.put(Double.class.getName(), JdkClasses.NUMBER_DOUBLE);
+
+        // Numbers, more complicated
+        mConcrete.put(BigInteger.class.getName(), JdkClasses.NUMBER_OTHER);
+        mConcrete.put(BigDecimal.class.getName(), JdkClasses.NUMBER_OTHER);
+
+        // And then Java Collection classes
+        mConcrete.put(HashMap.class.getName(), JdkClasses.MAP);
+        mConcrete.put(Hashtable.class.getName(), JdkClasses.MAP);
+        mConcrete.put(LinkedHashMap.class.getName(), JdkClasses.MAP);
+        mConcrete.put(TreeMap.class.getName(), JdkClasses.MAP);
+        mConcrete.put(EnumMap.class.getName(), JdkClasses.MAP);
+        mConcrete.put(Properties.class.getName(), JdkClasses.MAP);
+
+        mConcrete.put(ArrayList.class.getName(), JdkClasses.LIST_INDEXED);
+        mConcrete.put(Vector.class.getName(), JdkClasses.LIST_INDEXED);
+        mConcrete.put(LinkedList.class.getName(), JdkClasses.LIST_OTHER);
+
+        mConcrete.put(HashSet.class.getName(), JdkClasses.COLLECTION);
+        mConcrete.put(LinkedHashSet.class.getName(), JdkClasses.COLLECTION);
+        mConcrete.put(TreeSet.class.getName(), JdkClasses.COLLECTION);
+    }
+
+    /**
+     * Quick lookup method that tries to if the concrete class
+     * happens to be one of well-known classes. This works for
+     * leaf class types, but not for sub-classes of those
+     * types, and possibly not for generics versions of
+     * types.
+     */
+    public final static JdkClasses findTypeFast(Object value)
+    {
+        return mConcrete.get(value.getClass().getName());
+    }
+
+    /**
+     * Slower Reflection-based type inspector method.
+     */
+    public final static JdkClasses findTypeSlow(Object value)
+    {
+        /* Some types are final, and hence not checked here (will
+         * have been handled by fast method above):
+         *
+         * - Boolean
+         * - String (StringBuffer, StringBuilder)
+         * - Arrays for primitive types
+         *
+         * But we do need to check for
+         *
+         * - Most collection types
+         * - java.lang.Number (but is that integral or not?)
+         */
+
+        if (value instanceof Map) {
+            return JdkClasses.MAP;
+        }
+        if (value instanceof Object[]) {
+            return JdkClasses.ARRAY_OBJECT;
+        }
+        if (value instanceof List) {
+            /* Could check marker interface now, to know whether
+             * to index. But let's not bother... shouldn't make
+             * big difference.
+             */
+            return JdkClasses.LIST_OTHER;
+        }
+        if (value instanceof Collection) {
+            return JdkClasses.LIST_OTHER;
+        }
+
+        if (value instanceof CharSequence) {
+            return JdkClasses.STRING_LIKE;
+        }
+
+        if (value instanceof Number) {
+            return JdkClasses.NUMBER_OTHER;
+        }
+
+        return null;
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/map/impl/ArrayNode.java b/src/java/org/codehaus/jackson/map/impl/ArrayNode.java
new file mode 100644
index 0000000..f5f393b
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/ArrayNode.java
@@ -0,0 +1,178 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.map.JsonNode;
+
+/**
+ */
+public final class ArrayNode
+    extends ContainerNode
+{
+    ArrayList<JsonNode> mChildren;
+
+    protected ArrayNode() { }
+
+    @Override
+    public boolean isArray() { return true; }
+
+    @Override
+    public int size()
+    {
+        return (mChildren == null) ? 0 : mChildren.size();
+    }
+
+    @Override
+        public JsonNode getElementValue(int index)
+    {
+        if (index >= 0 && (mChildren != null) && index < mChildren.size()) {
+            return mChildren.get(index);
+        }
+        return null;
+    }
+
+    @Override
+        public JsonNode getFieldValue(String fieldName) { return null; }
+
+    @Override
+    public Iterator<JsonNode> getElements()
+    {
+        return (mChildren == null) ? NoNodesIterator.instance() : mChildren.iterator();
+    }
+
+    @Override
+        public JsonNode getPath(String fieldName) { return MissingNode.getInstance(); }
+
+    @Override
+    public JsonNode getPath(int index)
+    {
+        if (index >= 0 && (mChildren != null) && index < mChildren.size()) {
+            return mChildren.get(index);
+        }
+        return MissingNode.getInstance();
+    }
+
+    public void appendElement(JsonNode node)
+    {
+        if (mChildren == null) {
+            mChildren = new ArrayList<JsonNode>();
+        }
+        mChildren.add(node);
+    }
+
+    public void insertElement(int index, JsonNode value)
+    {
+        if (mChildren == null) {
+            mChildren = new ArrayList<JsonNode>();
+            mChildren.add(value);
+            return;
+        }
+
+        if (index < 0) {
+            mChildren.add(0, value);
+        } else if (index >= mChildren.size()) {
+            mChildren.add(value);
+        } else {
+            mChildren.add(index, value);
+        }
+    }
+
+    public JsonNode removeElement(int index)
+    {
+        if (index >= 0 && (mChildren != null) && index < mChildren.size()) {
+            return mChildren.remove(index);
+        }
+        return null;
+    }
+
+    public JsonNode removeElement(String fieldName)
+    {
+        return reportNoObjectMods();
+    }
+
+    public JsonNode setElement(int index, JsonNode value)
+    {
+        if (mChildren == null || index < 0 || index >= mChildren.size()) {
+            throw new IndexOutOfBoundsException("Illegal index "+index+", array size "+size());
+        }
+        return mChildren.set(index, value);
+    }
+
+    public JsonNode setElement(String fieldName, JsonNode value)
+    {
+        return reportNoObjectMods();
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeStartArray();
+        if (mChildren != null) {
+            for (JsonNode n : mChildren) {
+                n.writeTo(jg);
+            }
+        }
+        jg.writeEndArray();
+    }
+
+    /*
+    ////////////////////////////////////////////////////////
+    // Standard methods
+    ////////////////////////////////////////////////////////
+     */
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) {
+            return false;
+        }
+        ArrayNode other = (ArrayNode) o;
+        if (mChildren == null) {
+            return other.mChildren == null;
+        }
+        return other.sameChildren(mChildren);
+    }
+
+    @Override
+    public String toString()
+    {
+        StringBuilder sb = new StringBuilder(16 + (size() << 4));
+        sb.append('[');
+        if (mChildren != null) {
+            for (int i = 0, len = mChildren.size(); i < len; ++i) {
+                if (i > 0) {
+                    sb.append(',');
+                }
+                sb.append(mChildren.get(i).toString());
+            }
+        }
+        sb.append(']');
+        return sb.toString();
+    }
+
+    /*
+    ////////////////////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////////////////////
+     */
+
+    private boolean sameChildren(ArrayList<JsonNode> otherChildren)
+    {
+        int len = otherChildren.size();
+        if (mChildren.size() != len) {
+            return false;
+        }
+        for (int i = 0; i < len; ++i) {
+            if (!mChildren.get(i).equals(otherChildren.get(i))) {
+                return false;
+            }
+        }
+        return true;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/BooleanNode.java b/src/java/org/codehaus/jackson/map/impl/BooleanNode.java
new file mode 100644
index 0000000..d122056
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/BooleanNode.java
@@ -0,0 +1,53 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+
+/**
+ * This concrete value class is used to contain boolean (true / false)
+ * values. Only two instances are ever created, to minimize memory
+ * usage
+ */
+public final class BooleanNode
+    extends ValueNode
+{
+    // // Just need two instances...
+
+    private final static BooleanNode sTrue = new BooleanNode();
+    private final static BooleanNode sFalse = new BooleanNode();
+
+    private BooleanNode() { }
+
+    public static BooleanNode getTrue() { return sTrue; }
+    public static BooleanNode getFalse() { return sFalse; }
+
+    public static BooleanNode valueOf(boolean b) { return b ? sTrue : sFalse; }
+
+    @Override
+    public boolean isBoolean() { return true; }
+
+    @Override
+    public boolean getBooleanValue() {
+        return (this == sTrue);
+    }
+
+    public String getValueAsText() {
+        return (this == sTrue) ? "true" : "false";
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeBoolean(this == sTrue);
+    }
+
+    public boolean equals(Object o)
+    {
+        /* Since there are only ever two instances in existence
+         * can do identity comparison
+         */
+        return (o == this);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/ContainerNode.java b/src/java/org/codehaus/jackson/map/impl/ContainerNode.java
new file mode 100644
index 0000000..0875031
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/ContainerNode.java
@@ -0,0 +1,94 @@
+package org.codehaus.jackson.map.impl;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.codehaus.jackson.map.JsonNode;
+
+/**
+ * This intermediate base class is used for all container nodes,
+ * specifically, array and object nodes.
+ */
+public abstract class ContainerNode
+    extends JsonNode
+{
+    protected ContainerNode() { }
+
+    @Override
+    public boolean isContainerNode() { return true; }
+
+    @Override
+    public String getValueAsText() { return null; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Methods reset as abstract to force real implementation
+    ////////////////////////////////////////////////////
+     */
+
+    public abstract int size();
+
+    public abstract JsonNode getElementValue(int index);
+
+    public abstract JsonNode getFieldValue(String fieldName);
+
+    public abstract void appendElement(JsonNode node);
+
+    public abstract void insertElement(int index, JsonNode value);
+
+    public abstract JsonNode removeElement(int index);
+
+    public abstract JsonNode removeElement(String fieldName);
+
+    public abstract JsonNode setElement(int index, JsonNode value);
+
+    public abstract JsonNode setElement(String fieldName, JsonNode value);
+
+    /*
+    ////////////////////////////////////////////////////
+    // Implementations of convenience methods
+    ////////////////////////////////////////////////////
+     */
+
+    /*
+    ////////////////////////////////////////////////////
+    // Helper classes
+    ////////////////////////////////////////////////////
+     */
+
+    protected static class NoNodesIterator
+        implements Iterator<JsonNode>
+    {
+        final static NoNodesIterator sInstance = new NoNodesIterator();
+
+        private NoNodesIterator() { }
+
+        public static NoNodesIterator instance() { return sInstance; }
+
+        public boolean hasNext() { return false; }
+        public JsonNode next() { throw new NoSuchElementException(); }
+
+        public void remove() {
+            // could as well throw IllegalOperationException?
+            throw new IllegalStateException();
+        }
+    }
+
+    protected static class NoStringsIterator
+        implements Iterator<String>
+    {
+        final static NoStringsIterator sInstance = new NoStringsIterator();
+
+        private NoStringsIterator() { }
+
+        public static NoStringsIterator instance() { return sInstance; }
+
+        public boolean hasNext() { return false; }
+        public String next() { throw new NoSuchElementException(); }
+
+        public void remove() {
+            // could as well throw IllegalOperationException?
+            throw new IllegalStateException();
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/DecimalNode.java b/src/java/org/codehaus/jackson/map/impl/DecimalNode.java
new file mode 100644
index 0000000..d10b426
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/DecimalNode.java
@@ -0,0 +1,63 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+
+/**
+ * Numeric node that contains values that do not fit in simple
+ * integer (int, long) or floating point (double) values.
+ */
+public final class DecimalNode
+    extends NumericNode
+{
+    final BigDecimal mValue;
+
+    public DecimalNode(BigDecimal v) { mValue = v; }
+
+    public static DecimalNode valueOf(BigDecimal d) { return new DecimalNode(d); }
+
+    @Override
+        public boolean isFloatingPointNumber() { return true; }
+    
+    @Override
+        public boolean isBigDecimal() { return true; }
+    
+    @Override
+        public Number getNumberValue() { return mValue; }
+
+    @Override
+        public int getIntValue() { return mValue.intValue(); }
+
+    @Override
+        public long getLongValue() { return mValue.longValue(); }
+
+    @Override
+        public double getDoubleValue() { return mValue.doubleValue(); }
+
+    @Override
+        public BigDecimal getDecimalValue() { return mValue; }
+
+    public String getValueAsText() {
+        return mValue.toString();
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeNumber(mValue);
+    }
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) { // final class, can do this
+            return false;
+        }
+        return ((DecimalNode) o).mValue.equals(mValue);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/DoubleNode.java b/src/java/org/codehaus/jackson/map/impl/DoubleNode.java
new file mode 100644
index 0000000..53eb880
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/DoubleNode.java
@@ -0,0 +1,66 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.io.NumberOutput;
+
+/**
+ * Numeric node that contains 64-bit ("double precision")
+ * floating point values simple 32-bit integer values.
+ */
+public final class DoubleNode
+    extends NumericNode
+{
+    final double mValue;
+
+    public DoubleNode(double v) { mValue = v; }
+
+    public static DoubleNode valueOf(double v) { return new DoubleNode(v); }
+
+    @Override
+        public boolean isFloatingPointNumber() { return true; }
+
+    @Override
+        public boolean isDouble() { return true; }
+
+    @Override
+    public Number getNumberValue() {
+        return Double.valueOf(mValue);
+    }
+
+    @Override
+        public int getIntValue() { return (int) mValue; }
+
+    @Override
+        public long getLongValue() { return (long) mValue; }
+
+    @Override
+        public double getDoubleValue() { return mValue; }
+
+    @Override
+        public BigDecimal getDecimalValue() { return BigDecimal.valueOf(mValue); }
+
+    public String getValueAsText() {
+        return NumberOutput.toString(mValue);
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeNumber(mValue);
+    }
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) { // final class, can do this
+            return false;
+        }
+        return ((DoubleNode) o).mValue == mValue;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/IntNode.java b/src/java/org/codehaus/jackson/map/impl/IntNode.java
new file mode 100644
index 0000000..bbb807e
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/IntNode.java
@@ -0,0 +1,65 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.io.NumberOutput;
+
+/**
+ * Numeric node that contains simple 32-bit integer values.
+ */
+public final class IntNode
+    extends NumericNode
+{
+    final int mValue;
+
+    public IntNode(int v) { mValue = v; }
+
+    public static IntNode valueOf(int i) { return new IntNode(i); }
+
+    @Override
+    public boolean isIntegralNumber() { return true; }
+
+    @Override
+    public boolean isInt() { return true; }
+
+    @Override
+    public Number getNumberValue() {
+        return Integer.valueOf(mValue);
+    }
+
+    @Override
+        public int getIntValue() { return mValue; }
+
+    @Override
+        public long getLongValue() { return (long) mValue; }
+
+    @Override
+        public double getDoubleValue() { return (double) mValue; }
+
+    @Override
+        public BigDecimal getDecimalValue() { return BigDecimal.valueOf(mValue); }
+
+    public String getValueAsText() {
+        return NumberOutput.toString(mValue);
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeNumber(mValue);
+    }
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) { // final class, can do this
+            return false;
+        }
+        return ((IntNode) o).mValue == mValue;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/JsonTypeMapperBase.java b/src/java/org/codehaus/jackson/map/impl/JsonTypeMapperBase.java
new file mode 100644
index 0000000..18b6356
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/JsonTypeMapperBase.java
@@ -0,0 +1,126 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.JsonNode;
+import org.codehaus.jackson.map.BaseMapper;
+
+/**
+ * This intermediate base class is needed to access non-public
+ * (package) interface of node implementations during building.
+ */
+public abstract class JsonTypeMapperBase
+    extends BaseMapper
+{
+    protected JsonTypeMapperBase() { }
+
+    /*
+    /////////////////////////////////////////////////////
+    // Actual factory methods exposed and used by the mapper
+    // (can also be overridden by sub-classes for extra
+    // functionality)
+    /////////////////////////////////////////////////////
+     */
+
+    public ArrayNode arrayNode() { return new ArrayNode(); }
+    public ObjectNode objectNode() { return new ObjectNode(); }
+    public NullNode nullNode() { return NullNode.getInstance(); }
+
+    public TextNode textNode(String text) { return TextNode.valueOf(text); }
+
+    public BooleanNode booleanNode(boolean v) {
+        return v ? BooleanNode.getTrue() : BooleanNode.getFalse();
+    }
+
+    public NumericNode numberNode(int v) { return IntNode.valueOf(v); }
+    public NumericNode numberNode(long v) { return LongNode.valueOf(v); }
+    public NumericNode numberNode(double v) { return DoubleNode.valueOf(v); }
+    public NumericNode numberNode(BigDecimal v) { return DecimalNode.valueOf(v); }
+
+    /*
+    /////////////////////////////////////////////////////
+    // Mapping functionality
+    /////////////////////////////////////////////////////
+     */
+
+    protected JsonNode readAndMap(JsonParser jp, JsonToken currToken)
+        throws IOException, JsonParseException
+    {
+        switch (currToken) {
+        case START_OBJECT:
+            {
+                ObjectNode node = objectNode();
+                while ((currToken = jp.nextToken()) != JsonToken.END_OBJECT) {
+                    if (currToken != JsonToken.FIELD_NAME) {
+                        reportProblem(jp, "Unexpected token ("+currToken+"), expected FIELD_NAME");
+                    }
+                    String fieldName = jp.getText();
+                    JsonNode value = readAndMap(jp, jp.nextToken());
+
+                    if (mCfgDupFields == DupFields.ERROR) {
+                        JsonNode old = node.setElement(fieldName, value);
+                        if (old != null) {
+                            reportProblem(jp, "Duplicate value for field '"+fieldName+"', when dup fields mode is "+mCfgDupFields);
+                        }
+                    } else if (mCfgDupFields == DupFields.USE_LAST) {
+                        // Easy, just add
+                        node.setElement(fieldName, value);
+                    } else { // use first; need to ensure we don't yet have it
+                        if (node.getFieldValue(fieldName) == null) {
+                            node.setElement(fieldName, value);
+                        }
+                    }
+                }
+                return node;
+            }
+
+        case START_ARRAY:
+            {
+                ArrayNode node = arrayNode();
+                while ((currToken = jp.nextToken()) != JsonToken.END_ARRAY) {
+                    JsonNode value = readAndMap(jp, currToken);
+                    node.appendElement(value);
+                }
+                return node;
+            }
+
+        case VALUE_STRING:
+            return textNode(jp.getText());
+
+        case VALUE_NUMBER_INT:
+            if (jp.getNumberType() == JsonParser.NumberType.INT) {
+                return numberNode(jp.getIntValue());
+            }
+            return numberNode(jp.getLongValue());
+
+        case VALUE_NUMBER_FLOAT:
+            /* !!! Should we try to see if we should use some
+             *  other representation (BigDecimal)?
+             */
+            return numberNode(jp.getDoubleValue());
+
+        case VALUE_TRUE:
+            return booleanNode(true);
+
+        case VALUE_FALSE:
+            return booleanNode(false);
+
+        case VALUE_NULL:
+            return nullNode();
+
+            // These states can not be mapped; input stream is
+            // off by an event or two
+
+        case FIELD_NAME:
+        case END_OBJECT:
+        case END_ARRAY:
+            reportProblem(jp, "Can not map token "+currToken+": stream off by a token or two?");
+
+        default: // sanity check, should never happen
+            throwInternal("Unrecognized event type: "+currToken);
+            return null; // never gets this far
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/LongNode.java b/src/java/org/codehaus/jackson/map/impl/LongNode.java
new file mode 100644
index 0000000..393b7a1
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/LongNode.java
@@ -0,0 +1,65 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.io.NumberOutput;
+
+/**
+ * Numeric node that contains simple 64-bit integer values.
+ */
+public final class LongNode
+    extends NumericNode
+{
+    final long mValue;
+
+    public LongNode(long v) { mValue = v; }
+
+    public static LongNode valueOf(long l) { return new LongNode(l); }
+
+    @Override
+    public boolean isIntegralNumber() { return true; }
+
+    @Override
+    public boolean isLong() { return true; }
+
+    @Override
+    public Number getNumberValue() {
+        return Long.valueOf(mValue);
+    }
+
+    @Override
+        public int getIntValue() { return (int) mValue; }
+
+    @Override
+        public long getLongValue() { return mValue; }
+
+    @Override
+        public double getDoubleValue() { return (double) mValue; }
+
+    @Override
+        public BigDecimal getDecimalValue() { return BigDecimal.valueOf(mValue); }
+
+    public String getValueAsText() {
+        return NumberOutput.toString(mValue);
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeNumber(mValue);
+    }
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) { // final class, can do this
+            return false;
+        }
+        return ((LongNode) o).mValue == mValue;
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/MissingNode.java b/src/java/org/codehaus/jackson/map/impl/MissingNode.java
new file mode 100644
index 0000000..0f0f0ea
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/MissingNode.java
@@ -0,0 +1,65 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.map.JsonNode;
+
+/**
+ * This singleton node class is generated to denote "missing nodes"
+ * along paths that do not exist. For example, if a path via
+ * element of an array is requested for an element outside range
+ * of elements in the array; or for a non-array value, result
+ * will be reference to this node.
+ */
+public final class MissingNode
+    extends JsonNode
+{
+    private final static MissingNode sInstance = new MissingNode();
+
+    private MissingNode() { }
+
+    public static MissingNode getInstance() { return sInstance; }
+
+    @Override
+    public boolean isMissingNode() { return true; }
+
+    @Override
+    public String getValueAsText() { return null; }
+
+    @Override
+    public JsonNode getPath(String fieldName) { return this; }
+
+    @Override
+    public JsonNode getPath(int index) { return this; }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        /* Nothing to output... should we signal an error tho?
+         * Chances are, this is an erroneous call. For now, let's
+         * not do that.
+         */
+    }
+
+    public boolean equals(Object o)
+    {
+        /* Hmmh. Since there's just a singleton instance, this
+         * fails in all cases but with identity comparison.
+         * However: if this placeholder value was to be considered
+         * similar to Sql NULL, it shouldn't even equal itself?
+         * That might cause problems when dealing with collections
+         * like Sets... so for now, let's let identity comparison
+         * return true.
+         */
+        return (o == this);
+    }
+
+    @Override
+    public String toString()
+    {
+        // toString() should never return null
+        return "";
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/NullNode.java b/src/java/org/codehaus/jackson/map/impl/NullNode.java
new file mode 100644
index 0000000..9172a5c
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/NullNode.java
@@ -0,0 +1,40 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+
+/**
+ * This singleton value class is used to contain explicit JSON null
+ * value.
+ */
+public final class NullNode
+    extends ValueNode
+{
+    // // Just need two instances...
+
+    private final static NullNode sNull = new NullNode();
+
+    private NullNode() { }
+
+    public static NullNode getInstance() { return sNull; }
+
+    @Override
+    public boolean isNull() { return true; }
+
+    public String getValueAsText() {
+        return "null";
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeNull();
+    }
+
+    public boolean equals(Object o)
+    {
+        return (o == this);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/NumericNode.java b/src/java/org/codehaus/jackson/map/impl/NumericNode.java
new file mode 100644
index 0000000..a9fbe59
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/NumericNode.java
@@ -0,0 +1,25 @@
+package org.codehaus.jackson.map.impl;
+
+import java.math.BigDecimal;
+
+/**
+ * Intermediate value node used for numeric nodes.
+ */
+public abstract class NumericNode
+    extends ValueNode
+{
+    protected NumericNode() { }
+
+    @Override
+    public final boolean isNumber() { return true; }
+
+    // // // Let's re-abstract so sub-classes handle them
+
+    public abstract Number getNumberValue();
+    public abstract int getIntValue();
+    public abstract long getLongValue();
+    public abstract double getDoubleValue();
+    public abstract BigDecimal getDecimalValue();
+
+    public abstract String getValueAsText();
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/ObjectNode.java b/src/java/org/codehaus/jackson/map/impl/ObjectNode.java
new file mode 100644
index 0000000..bd692dc
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/ObjectNode.java
@@ -0,0 +1,171 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.map.JsonNode;
+
+/**
+ */
+public final class ObjectNode
+    extends ContainerNode
+{
+    LinkedHashMap<String, JsonNode> mChildren = null;
+
+    protected ObjectNode() { }
+
+    @Override
+    public boolean isObject() { return true; }
+
+    @Override
+    public int size() {
+        return (mChildren == null) ? 0 : mChildren.size();
+    }
+
+    @Override
+        public JsonNode getElementValue(int index) { return null; }
+
+    @Override
+        public JsonNode getFieldValue(String fieldName)
+    {
+        if (mChildren != null) {
+            return mChildren.get(fieldName);
+        }
+        return null;
+    }
+
+    @Override
+    public Iterator<String> getFieldNames()
+    {
+        return (mChildren == null) ? NoStringsIterator.instance() : mChildren.keySet().iterator();
+    }
+
+    @Override
+    public Iterator<JsonNode> getFieldValues()
+    {
+        return (mChildren == null) ? NoNodesIterator.instance() : mChildren.values().iterator();
+    }
+
+    @Override
+    public JsonNode getPath(int index)
+    {
+        return MissingNode.getInstance();
+    }
+
+    @Override
+        public JsonNode getPath(String fieldName)
+    {
+        if (mChildren != null) {
+            JsonNode n = mChildren.get(fieldName);
+            if (n != null) {
+                return n;
+            }
+        }
+        return MissingNode.getInstance();
+    }
+
+    public void appendElement(JsonNode node)
+    {
+        reportNoArrayMods();
+    }
+
+    public void insertElement(int index, JsonNode value)
+    {
+        reportNoArrayMods();
+    }
+
+    public JsonNode removeElement(int index)
+    {
+        return reportNoArrayMods();
+    }
+
+    public JsonNode removeElement(String fieldName)
+    {
+        if (mChildren != null) {
+            return mChildren.remove(fieldName);
+        }
+        return null;
+    }
+
+    public JsonNode setElement(int index, JsonNode value)
+    {
+        return reportNoArrayMods();
+    }
+
+    public JsonNode setElement(String fieldName, JsonNode value)
+    {
+        if (mChildren == null) {
+            mChildren = new LinkedHashMap<String, JsonNode>();
+        }
+        return mChildren.put(fieldName, value);
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeStartObject();
+        if (mChildren != null) {
+            for (Map.Entry<String, JsonNode> en : mChildren.entrySet()) {
+                jg.writeFieldName(en.getKey());
+                en.getValue().writeTo(jg);
+            }
+        }
+        jg.writeEndObject();
+    }
+
+    /*
+    ////////////////////////////////////////////////////////
+    // Standard methods
+    ////////////////////////////////////////////////////////
+     */
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) {
+            return false;
+        }
+        ObjectNode other = (ObjectNode) o;
+        if (other.size() != size()) {
+            return false;
+        }
+        if (mChildren != null) {
+            for (Map.Entry<String, JsonNode> en : mChildren.entrySet()) {
+                String key = en.getKey();
+                JsonNode value = en.getValue();
+
+                JsonNode otherValue = other.getFieldValue(key);
+
+                if (otherValue == null || !otherValue.equals(value)) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public String toString()
+    {
+        StringBuilder sb = new StringBuilder(32 + (size() << 4));
+        sb.append("{");
+        if (mChildren != null) {
+            int count = 0;
+            for (Map.Entry<String, JsonNode> en : mChildren.entrySet()) {
+                if (count > 0) {
+                    sb.append(",");
+                }
+                ++count;
+                TextNode.appendQuoted(sb, en.getKey());
+                sb.append(':');
+                sb.append(en.getValue().toString());
+            }
+        }
+        sb.append("}");
+        return sb.toString();
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/TextNode.java b/src/java/org/codehaus/jackson/map/impl/TextNode.java
new file mode 100644
index 0000000..2e02db6
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/TextNode.java
@@ -0,0 +1,80 @@
+package org.codehaus.jackson.map.impl;
+
+import java.io.IOException;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonGenerator;
+import org.codehaus.jackson.util.CharTypes;
+
+/**
+ * Value node that contains text value
+ */
+public final class TextNode
+    extends ValueNode
+{
+    final static TextNode EMPTY_STRING_NODE = new TextNode("");
+
+    final String mValue;
+
+    public TextNode(String v) { mValue = v; }
+
+    public static TextNode valueOf(String v)
+    {
+        if (v == null) {
+            return null;
+        }
+        if (v.length() == 0) {
+            return EMPTY_STRING_NODE;
+        }
+        return new TextNode(v);
+    }
+
+    @Override
+    public boolean isTextual() { return true; }
+
+    @Override
+    public String getTextValue() {
+        return mValue;
+    }
+
+    public String getValueAsText() {
+        return mValue;
+    }
+
+    public void writeTo(JsonGenerator jg)
+        throws IOException, JsonGenerationException
+    {
+        jg.writeString(mValue);
+    }
+
+    public boolean equals(Object o)
+    {
+        if (o == this) {
+            return true;
+        }
+        if (o.getClass() != getClass()) { // final class, can do this
+            return false;
+        }
+        return ((TextNode) o).mValue == mValue;
+    }
+
+    /**
+     * Different from other values, Strings need quoting
+     */
+    @Override
+    public String toString()
+    {
+        int len = mValue.length();
+        len = len + 2 + (len >> 4);
+        StringBuilder sb = new StringBuilder(len);
+        appendQuoted(sb, mValue);
+        return sb.toString();
+    }
+
+    protected static void appendQuoted(StringBuilder sb, String content)
+    {
+        sb.append('"');
+        CharTypes.appendQuoted(sb, content);
+        sb.append('"');
+    }
+}
diff --git a/src/java/org/codehaus/jackson/map/impl/ValueNode.java b/src/java/org/codehaus/jackson/map/impl/ValueNode.java
new file mode 100644
index 0000000..ffe05ae
--- /dev/null
+++ b/src/java/org/codehaus/jackson/map/impl/ValueNode.java
@@ -0,0 +1,38 @@
+package org.codehaus.jackson.map.impl;
+
+import org.codehaus.jackson.map.JsonNode;
+
+/**
+ * This intermediate base class is used for all leaf nodes, that is,
+ * all non-container (array or object) nodes, except for the
+ * "missing node".
+ */
+public abstract class ValueNode
+    extends JsonNode
+{
+    protected ValueNode() { }
+
+    @Override
+    public boolean isValueNode() { return true; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, path handling
+    ////////////////////////////////////////////////////
+     */
+
+    @Override
+    public JsonNode getPath(String fieldName) { return MissingNode.getInstance(); }
+
+    @Override
+    public JsonNode getPath(int index) { return MissingNode.getInstance(); }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Base impls for standard methods
+    ////////////////////////////////////////////////////
+     */
+
+    @Override
+    public String toString() { return getValueAsText(); }
+}
diff --git a/src/java/org/codehaus/jackson/sym/Name.java b/src/java/org/codehaus/jackson/sym/Name.java
new file mode 100644
index 0000000..7d257bb
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/Name.java
@@ -0,0 +1,59 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * Base class for tokenized names (key strings in objects) that have
+ * been tokenized from byte-based input sources (like
+ * {@link java.io.InputStream}.
+ *
+ * @author Tatu Saloranta
+ */
+public abstract class Name
+{
+    protected final String mName;
+
+    protected final int mHashCode;
+
+    protected Name(String name, int hashCode) {
+        mName = name;
+        mHashCode = hashCode;
+    }
+
+    public String getName() { return mName; }
+
+    /*
+    //////////////////////////////////////////////////////////
+    // Methods for package/core parser
+    //////////////////////////////////////////////////////////
+     */
+
+    public abstract boolean equals(int quad1, int quad2);
+
+    public abstract boolean equals(int[] quads, int qlen);
+
+    public abstract int sizeInQuads();
+
+    public abstract int getFirstQuad();
+
+    public abstract int getQuad(int index);
+
+    public abstract int getLastQuad();
+
+    /*
+    //////////////////////////////////////////////////////////
+    // Overridden standard methods
+    //////////////////////////////////////////////////////////
+     */
+
+    @Override
+        public String toString() { return mName; }
+
+    @Override
+        public final int hashCode() { return mHashCode; }
+
+    @Override
+        public boolean equals(Object o)
+    {
+        // Canonical instances, can usually just do identity comparison
+        return (o == this);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/sym/Name1.java b/src/java/org/codehaus/jackson/sym/Name1.java
new file mode 100644
index 0000000..20d2803
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/Name1.java
@@ -0,0 +1,46 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * Specialized implementation of PName: can be used for short Strings
+ * that consists of at most 4 bytes. Usually this means short
+ * ascii-only names.
+ *<p>
+ * The reason for such specialized classes is mostly space efficiency;
+ * and to a lesser degree performance. Both are achieved for short
+ * Strings by avoiding another level of indirection (via quad arrays)
+ */
+public final class Name1
+    extends Name
+{
+    final int mQuad;
+
+    Name1(String name, int hash, int quad)
+    {
+        super(name, hash);
+        mQuad = quad;
+    }
+
+    public boolean equals(int quad1, int quad2)
+    {
+        return (quad1 == mQuad) && (quad2 == 0);
+    }
+
+    public boolean equals(int[] quads, int qlen)
+    {
+        return (qlen == 1 && quads[0] == mQuad);
+    }
+
+    public int getFirstQuad() {
+        return mQuad;
+    }
+
+    public int getLastQuad() {
+        return mQuad;
+    }
+
+    public int getQuad(int index) {
+        return (index == 0) ? mQuad : 0;
+    }
+
+    public int sizeInQuads() { return 1; }
+}
diff --git a/src/java/org/codehaus/jackson/sym/Name2.java b/src/java/org/codehaus/jackson/sym/Name2.java
new file mode 100644
index 0000000..201799b
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/Name2.java
@@ -0,0 +1,52 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * Specialized implementation of PName: can be used for short Strings
+ * that consists of 5 to 8 bytes. Usually this means relatively short
+ * ascii-only names.
+ *<p>
+ * The reason for such specialized classes is mostly space efficiency;
+ * and to a lesser degree performance. Both are achieved for short
+ * Strings by avoiding another level of indirection (via quad arrays)
+ */
+public final class Name2
+    extends Name
+{
+    final int mQuad1;
+
+    final int mQuad2;
+
+    Name2(String name, int hash, int quad1, int quad2)
+    {
+        super(name, hash);
+        mQuad1 = quad1;
+        mQuad2 = quad2;
+    }
+
+    public boolean equals(int quad1, int quad2)
+    {
+        return (quad1 == mQuad1) && (quad2 == mQuad2);
+    }
+
+    public boolean equals(int[] quads, int qlen)
+    {
+        return (qlen == 2 && quads[0] == mQuad1 && quads[1] == mQuad2);
+    }
+
+    public int getFirstQuad() {
+        return mQuad1;
+    }
+
+    public int getLastQuad() {
+        return mQuad2;
+    }
+
+    public int getQuad(int index) {
+        /* Note: should never be called with an illegal index; hence
+         * value return is arbitrary for indexes other than 0 or 1
+         */
+        return (index == 0) ? mQuad1 :mQuad2;
+    }
+
+    public int sizeInQuads() { return 2; }
+}
diff --git a/src/java/org/codehaus/jackson/sym/Name3.java b/src/java/org/codehaus/jackson/sym/Name3.java
new file mode 100644
index 0000000..aaa5fd0
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/Name3.java
@@ -0,0 +1,54 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * Specialized implementation of PName: can be used for short Strings
+ * that consists of 9 to 12 bytes. It's the longest special purpose
+ * implementaion; longer ones are expressed using {@link NameN}.
+ */
+public final class Name3
+    extends Name
+{
+    final int mQuad1;
+    final int mQuad2;
+    final int mQuad3;
+
+    Name3(String name, int hash, int q1, int q2, int q3)
+    {
+        super(name, hash);
+        mQuad1 = q1;
+        mQuad2 = q2;
+        mQuad3 = q3;
+    }
+
+    public boolean equals(int quad1, int quad2)
+    {
+        // Implies quad length < 3, never matches
+        return false;
+    }
+
+    public boolean equals(int[] quads, int qlen)
+    {
+        return (qlen == 3)
+            && (quads[0] == mQuad1)
+            && (quads[1] == mQuad2)
+            && (quads[2] == mQuad3);
+    }
+
+    public int getFirstQuad() {
+        return mQuad1;
+    }
+
+    public int getLastQuad() {
+        return mQuad3;
+    }
+
+    public int getQuad(int index) {
+        if (index < 2) {
+            return (index == 0) ? mQuad1 : mQuad2;
+        }
+        // Whatever would be returned for invalid index is arbitrary, so:
+        return mQuad3;
+    }
+
+    public int sizeInQuads() { return 3; }
+}
diff --git a/src/java/org/codehaus/jackson/sym/NameCanonicalizer.java b/src/java/org/codehaus/jackson/sym/NameCanonicalizer.java
new file mode 100644
index 0000000..b42fcc1
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/NameCanonicalizer.java
@@ -0,0 +1,750 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * This class is basically a caching symbol table implementation used for
+ * canonicalizing {@link Name}s, constructed directly from a byte-based
+ * input source.
+ *
+ * @author Tatu Saloranta
+ */
+public final class NameCanonicalizer
+{
+    final static int MIN_HASH_SIZE = 16;
+
+    final static int INITIAL_COLLISION_LEN = 32;
+
+    /**
+     * Bucket index is 8 bits, and value 0 is reserved to represent
+     * 'empty' status.
+     */
+    final static int LAST_VALID_BUCKET = 0xFE;
+
+    /*
+    /////////////////////////////////////////////////////
+    // Main table state
+    /////////////////////////////////////////////////////    
+     */
+
+    // // // First, global information
+
+    /**
+     * Total number of Names in the symbol table
+     */
+    private int mCount;
+
+    // // // Then information regarding primary hash array and its
+    // // // matching Name array
+
+    /**
+     * Mask used to truncate 32-bit hash value to current hash array
+     * size; essentially, hash array size - 1 (since hash array sizes
+     * are 2^N).
+     */
+    private int mMainHashMask;
+
+    /**
+     * Array of 2^N size, which contains combination
+     * of 24-bits of hash (0 to indicate 'empty' slot),
+     * and 8-bit collision bucket index (0 to indicate empty
+     * collision bucket chain; otherwise subtract one from index)
+     */
+    private int[] mMainHash;
+
+    /**
+     * Array that contains <code>Name</code> instances matching
+     * entries in <code>mMainHash</code>. Contains nulls for unused
+     * entries.
+     */
+    private Name[] mMainNames;
+
+    // // // Then the collision/spill-over area info
+
+    /**
+     * Array of heads of collision bucket chains; size dynamically
+     */
+    private Bucket[] mCollList;
+
+    /**
+     * Total number of Names in collision buckets (included in
+     * <code>mCount</code> along with primary entries)
+     */
+    private int mCollCount;
+
+    /**
+     * Index of the first unused collision bucket entry (== size of
+     * the used portion of collision list): less than
+     * or equal to 0xFF (255), since max number of entries is 255
+     * (8-bit, minus 0 used as 'empty' marker)
+     */
+    private int mCollEnd;
+
+    // // // Info regarding pending rehashing...
+
+    /**
+     * This flag is set if, after adding a new entry, it is deemed
+     * that a rehash is warranted if any more entries are to be added.
+     */
+    private transient boolean mNeedRehash;
+
+    /*
+    /////////////////////////////////////////////////////
+    // Sharing, versioning
+    /////////////////////////////////////////////////////    
+     */
+
+    // // // Which of the buffers may be shared (and are copy-on-write)?
+
+    /**
+     * Flag that indicates whether underlying data structures for
+     * the main hash area are shared or not. If they are, then they
+     * need to be handled in copy-on-write way, i.e. if they need
+     * to be modified, a copy needs to be made first; at this point
+     * it will not be shared any more, and can be modified.
+     *<p>
+     * This flag needs to be checked both when adding new main entries,
+     * and when adding new collision list queues (i.e. creating a new
+     * collision list head entry)
+     */
+    private boolean mMainHashShared;
+
+    private boolean mMainNamesShared;
+
+    /**
+     * Flag that indicates whether underlying data structures for
+     * the collision list are shared or not. If they are, then they
+     * need to be handled in copy-on-write way, i.e. if they need
+     * to be modified, a copy needs to be made first; at this point
+     * it will not be shared any more, and can be modified.
+     *<p>
+     * This flag needs to be checked when adding new collision entries.
+     */
+    private boolean mCollListShared;
+
+    /*
+    /////////////////////////////////////////////////////
+    // Construction, merging
+    /////////////////////////////////////////////////////
+     */
+
+    public NameCanonicalizer(int hashSize)
+    {
+        /* Sanity check: let's now allow hash sizes below certain
+         * min. value
+         */
+        if (hashSize < MIN_HASH_SIZE) {
+            hashSize = MIN_HASH_SIZE;
+        } else {
+            /* Also; size must be 2^N; otherwise hash algorithm won't
+             * work... so let's just pad it up, if so
+             */
+            if ((hashSize & (hashSize - 1)) != 0) { // only true if it's 2^N
+                int curr = MIN_HASH_SIZE;
+                while (curr < hashSize) {
+                    curr += curr;
+                }
+                //System.out.println("WARNING: hashSize "+hashSize+" illegal; padding up to "+curr);
+                hashSize = curr;
+            }
+        }
+
+        mCount = 0;
+        mMainHashShared = false;
+        mMainNamesShared = false;
+        mMainHashMask = hashSize - 1;
+        mMainHash = new int[hashSize];
+        mMainNames = new Name[hashSize];
+
+        mCollListShared = true; // just since it'll need to be allocated
+        mCollList = null;
+        mCollEnd = 0;
+
+        mNeedRehash = false;
+    }
+
+    /**
+     * Constructor used when creating a child instance
+     */
+    NameCanonicalizer(NameCanonicalizer parent)
+    {
+        // First, let's copy the state as is:
+        mCount = parent.mCount;
+        mMainHashMask = parent.mMainHashMask;
+        mMainHash = parent.mMainHash;
+        mMainNames = parent.mMainNames;
+        mCollList = parent.mCollList;
+        mCollCount = parent.mCollCount;
+        mCollEnd = parent.mCollEnd;
+        mNeedRehash = false;
+
+        // And consider all shared, so far:
+        mMainHashShared = true;
+        mMainNamesShared = true;
+        mCollListShared = true;
+    }
+
+    public boolean mergeFromChild(NameCanonicalizer child)
+    {
+        // Only makes sense if child has more entries
+        if (child.mCount <= mCount) {
+            return false;
+        }
+
+        mCount = child.mCount;
+        mMainHashMask = child.mMainHashMask;
+        mMainHash = child.mMainHash;
+        mMainNames = child.mMainNames;
+        mCollList = child.mCollList;
+        mCollCount = child.mCollCount;
+        mCollEnd = child.mCollEnd;
+
+        /* Plus, as an added safety measure, let's mark child buffers
+         * as shared, just in case it might still be used:
+         */
+        child.markAsShared();
+        return true;
+    }
+
+    public void markAsShared()
+    {
+        mMainHashShared = true;
+        mMainNamesShared = true;
+        mCollListShared = true;
+    }
+
+    /**
+     * Method used by test code, to reset state of the name table.
+     */
+    public void nuke() {
+        mMainHash = null;
+        mMainNames = null;
+        mCollList = null;
+    }
+
+    /*
+    /////////////////////////////////////////////////////
+    // API, accessors
+    /////////////////////////////////////////////////////
+     */
+
+    public int size() { return mCount; }
+
+    /**
+     * Method called to check to quickly see if a child symbol table
+     * may have gotten additional entries. Used for checking to see
+     * if a child table should be merged into shared table.
+     */
+    public boolean maybeDirty()
+    {
+        return !mMainHashShared;
+    }
+
+    /**
+     * Finds and returns name matching the specified symbol, if such
+     * name already exists in the table; or if not, creates name object,
+     * adds to the table, and returns it.
+     *<p>
+     * Note: separate methods to optimize common case of relatively
+     * short element/attribute names (8 or less ascii characters)
+     *
+     * @param firstQuad int32 containing first 4 bytes of the pname;
+     *   if the whole name less than 4 bytes, padded with zero bytes
+     *   in front (zero MSBs, ie. right aligned)
+     * @param secondQuad int32 containing bytes 5 through 8 of the
+     *   pname; if less than 8 bytes, padded with up to 4 zero bytes
+     *   in front (zero MSBs, ie. right aligned)
+     *
+     * @return Name matching the symbol passed (or constructed for
+     *   it)
+     */
+    public Name canonicalize(int hash, int firstQuad, int secondQuad)
+    {
+        
+        int ix = (hash & mMainHashMask);
+        int val = mMainHash[ix];
+        
+        /* High 24 bits of the value are low 24 bits of hash (low 8 bits
+         * are bucket index)... match?
+         */
+        if ((((val >> 8) ^ hash) << 8) == 0) { // match
+            // Ok, but do we have an actual match?
+            Name pname = mMainNames[ix];
+            if (pname == null) { // main slot empty; can't find
+                return null;
+            }
+            if (pname.equals(firstQuad, secondQuad)) {
+                return pname;
+            }
+        } else if (val == 0) { // empty slot? no match
+            return null;
+        }
+        // Maybe a spill-over?
+        val &= 0xFF;
+        if (val > 0) { // 0 means 'empty'
+            val -= 1; // to convert from 1-based to 0...
+            Bucket bucket = mCollList[val];
+            if (bucket != null) {
+                return bucket.find(hash, firstQuad, secondQuad);
+            }
+        }
+        // Nope, no match whatsoever
+        return null;
+    }
+
+    /**
+     * Finds and returns name matching the specified symbol, if such
+     * name already exists in the table; or if not, creates name object,
+     * adds to the table, and returns it.
+     *<p>
+     * Note: this is the general purpose method that can be called for
+     * names of any length. However, if name is less than 9 bytes long,
+     * it is preferable to call the version optimized for short
+     * names.
+     *
+     * @param quads Array of int32s, each of which contain 4 bytes of
+     *   encoded name
+     * @param qlen Number of int32s, starting from index 0, in quads
+     *   parameter
+     *
+     * @return Name matching the symbol passed (or constructed for
+     *   it)
+     */
+    public Name canonicalize(int hash, int[] quads, int qlen)
+    {
+        if (qlen < 3) { // another sanity check
+            return canonicalize(hash, quads[0], (qlen < 2) ? 0 : quads[1]);
+        }
+        // (for rest of comments regarding logic, see method above)
+        int ix = (hash & mMainHashMask);
+        int val = mMainHash[ix];
+        if ((((val >> 8) ^ hash) << 8) == 0) {
+            Name pname = mMainNames[ix];
+            if (pname == null) { // main slot empty; no collision list then either
+                return null;
+            }
+            if (pname.equals(quads, qlen)) { // should be match, let's verify
+                return pname;
+            }
+        } else if (val == 0) { // empty slot? no match
+            return null;
+        }
+        val &= 0xFF;
+        if (val > 0) { // 0 means 'empty'
+            val -= 1; // to convert from 1-based to 0...
+            Bucket bucket = mCollList[val];
+            if (bucket != null) {
+                return bucket.find(hash, quads, qlen);
+            }
+        }
+        return null;
+    }
+
+    /*
+    /////////////////////////////////////////////////////
+    // API, mutators
+    /////////////////////////////////////////////////////
+     */
+
+    public Name addSymbol(int hash, String symbolStr, int firstQuad, int secondQuad)
+    {
+        Name symbol = NameFactory.construct(hash, symbolStr, firstQuad, secondQuad);
+        doAddSymbol(hash, symbol);
+        return symbol;
+    }
+
+    public Name addSymbol(int hash, String symbolStr, int[] quads, int qlen)
+    {
+        Name symbol = NameFactory.construct(hash, symbolStr, quads, qlen);
+        doAddSymbol(hash, symbol);
+        return symbol;
+    }
+
+    /*
+    /////////////////////////////////////////////////////
+    // Helper methods
+    /////////////////////////////////////////////////////
+     */
+
+    public final static int calcHash(int firstQuad)
+    {
+        int hash = firstQuad * 31;
+        hash ^= (hash >>> 16); // to xor hi- and low- 16-bits
+        hash ^= (hash >>> 8); // as well as lowest 2 bytes
+        return hash;
+    }
+
+    public final static int calcHash(int firstQuad, int secondQuad)
+    {
+        int hash = (firstQuad * 31) + secondQuad;
+        hash ^= (hash >>> 16); // to xor hi- and low- 16-bits
+        hash ^= (hash >>> 8); // as well as lowest 2 bytes
+        return hash;
+    }
+
+    public final static int calcHash(int[] quads, int qlen)
+    {
+        int hash = quads[0];
+        for (int i = 1; i < qlen; ++i) {
+            hash = (hash * 31) + quads[i];
+        }
+        hash ^= (hash >>> 16); // to xor hi- and low- 16-bits
+        hash ^= (hash >>> 8); // as well as lowest 2 bytes
+        return hash;
+    }
+
+    public static int[] calcQuads(byte[] wordBytes)
+    {
+        int blen = wordBytes.length;
+        int[] result = new int[(blen + 3) / 4];
+        for (int i = 0; i < blen; ++i) {
+            int x = wordBytes[i] & 0xFF;
+
+            if (++i < blen) {
+                x = (x << 8) | (wordBytes[i] & 0xFF);
+                if (++i < blen) {
+                    x = (x << 8) | (wordBytes[i] & 0xFF);
+                    if (++i < blen) {
+                        x = (x << 8) | (wordBytes[i] & 0xFF);
+                    }
+                }
+            }
+            result[i >> 2] = x;
+        }
+        return result;
+    }
+
+    /*
+    /////////////////////////////////////////////////////
+    // Standard methods
+    /////////////////////////////////////////////////////
+     */
+
+    public String toString()
+    {
+        StringBuilder sb = new StringBuilder();
+        sb.append("[NameCanonicalizer, size: ");
+        sb.append(mCount);
+        sb.append('/');
+        sb.append(mMainHash.length);
+        sb.append(", ");
+        sb.append(mCollCount);
+        sb.append(" coll; avg length: ");
+
+        /* Average length: minimum of 1 for all (1 == primary hit);
+         * and then 1 per each traversal for collisions/buckets
+         */
+        //int maxDist = 1;
+        int pathCount = mCount;
+        for (int i = 0; i < mCollEnd; ++i) {
+            int spillLen = mCollList[i].length();
+            for (int j = 1; j <= spillLen; ++j) {
+                pathCount += j;
+            }
+        }
+        double avgLength;
+
+        if (mCount == 0) {
+            avgLength = 0.0;
+        } else {
+            avgLength = (double) pathCount / (double) mCount;
+        }
+        // let's round up a bit (two 2 decimal places)
+        //avgLength -= (avgLength % 0.01);
+
+        sb.append(avgLength);
+        sb.append(']');
+        return sb.toString();
+    }
+
+    /*
+    /////////////////////////////////////////////////////
+    // Internal methods
+    /////////////////////////////////////////////////////
+     */
+
+    private void doAddSymbol(int hash, Name symbol)
+    {
+        if (mMainHashShared) { // always have to modify main entry
+            unshareMain();
+        }
+        // First, do we need to rehash?
+        if (mNeedRehash) {
+            rehash();
+        }
+
+        ++mCount;
+        /* Ok, enough about set up: now we need to find the slot to add
+         * symbol in:
+         */
+        int ix = (hash & mMainHashMask);
+        if (mMainNames[ix] == null) { // primary empty?
+            mMainHash[ix] = (hash << 8);
+            if (mMainNamesShared) {
+                unshareNames();
+            }
+            mMainNames[ix] = symbol;
+        } else { // nope, it's a collision, need to spill over
+            /* How about spill-over area... do we already know the bucket
+             * (is the case if it's not the first collision)
+             */
+            if (mCollListShared) {
+                unshareCollision(); // also allocates if list was null
+            }
+            ++mCollCount;
+            int entryValue = mMainHash[ix];
+            int bucket = entryValue & 0xFF;
+            if (bucket == 0) { // first spill over?
+                if (mCollEnd <= LAST_VALID_BUCKET) { // yup, still unshared bucket
+                    bucket = mCollEnd;
+                    ++mCollEnd;
+                    // need to expand?
+                    if (bucket >= mCollList.length) {
+                        expandCollision();
+                    }
+                } else { // nope, have to share... let's find shortest?
+                    bucket = findBestBucket();
+                }
+                // Need to mark the entry... and the spill index is 1-based
+                mMainHash[ix] = (entryValue & ~0xFF) | (bucket + 1);
+            } else {
+                --bucket; // 1-based index in value
+            }
+            
+            // And then just need to link the new bucket entry in
+            mCollList[bucket] = new Bucket(symbol, mCollList[bucket]);
+        }
+
+        /* Ok. Now, do we need a rehash next time? Need to have at least
+         * 50% fill rate no matter what:
+         */
+        {
+            int hashSize = mMainHash.length;
+            if (mCount > (hashSize >> 1)) {
+                int hashQuarter = (hashSize >> 2);
+                /* And either strictly above 75% (the usual) or
+                 * just 50%, and collision count >= 25% of total hash size
+                 */
+                if (mCount > (hashSize - hashQuarter)) {
+                    mNeedRehash = true;
+                } else if (mCollCount >= hashQuarter) {
+                    mNeedRehash = true;
+                }
+            }
+        }
+    }
+
+    private void rehash()
+    {
+        mNeedRehash = false;
+        // Note: since we'll make copies, no need to unshare, can just mark as such:
+        mMainNamesShared = false;
+        mCollListShared = false;
+
+        /* And then we can first deal with the main hash area. Since we
+         * are expanding linearly (double up), we know there'll be no
+         * collisions during this phase.
+         */
+        int symbolsSeen = 0; // let's do a sanity check
+        int[] oldMainHash = mMainHash;
+        int len = oldMainHash.length;
+        mMainHash = new int[len + len];
+        mMainHashMask = (len + len - 1);
+        Name[] oldNames = mMainNames;
+        mMainNames = new Name[len + len];
+        for (int i = 0; i < len; ++i) {
+            Name symbol = oldNames[i];
+            if (symbol != null) {
+                ++symbolsSeen;
+                int hash = symbol.hashCode();
+                int ix = (hash & mMainHashMask);
+                mMainNames[ix] = symbol;
+                mMainHash[ix] = hash << 8; // will clear spill index
+            }
+        }
+
+        /* And then the spill area. This may cause collisions, although
+         * not necessarily as many as there were earlier. Let's allocate
+         * same amount of space, however
+         */
+        int oldEnd = mCollEnd;
+        if (oldEnd == 0) { // no prior collisions...
+            return;
+        }
+
+        mCollCount = 0;
+        mCollEnd = 0;
+
+        Bucket[] oldBuckets = mCollList;
+        mCollList = new Bucket[oldBuckets.length];
+        for (int i = 0; i < oldEnd; ++i) {
+            for (Bucket curr = oldBuckets[i]; curr != null; curr = curr.mNext) {
+                ++symbolsSeen;
+                Name symbol = curr.mName;
+                int hash = symbol.hashCode();
+                int ix = (hash & mMainHashMask);
+                int val = mMainHash[ix];
+                if (mMainNames[ix] == null) { // no primary entry?
+                    mMainHash[ix] = (hash << 8);
+                    mMainNames[ix] = symbol;
+                } else { // nope, it's a collision, need to spill over
+                    ++mCollCount;
+                    int bucket = val & 0xFF;
+                    if (bucket == 0) { // first spill over?
+                        if (mCollEnd <= LAST_VALID_BUCKET) { // yup, still unshared bucket
+                            bucket = mCollEnd;
+                            ++mCollEnd;
+                            // need to expand?
+                            if (bucket >= mCollList.length) {
+                                expandCollision();
+                            }
+                        } else { // nope, have to share... let's find shortest?
+                            bucket = findBestBucket();
+                        }
+                        // Need to mark the entry... and the spill index is 1-based
+                        mMainHash[ix] = (val & ~0xFF) | (bucket + 1);
+                    } else {
+                        --bucket; // 1-based index in value
+                    }
+                    // And then just need to link the new bucket entry in
+                    mCollList[bucket] = new Bucket(symbol, mCollList[bucket]);
+                }
+            } // for (... buckets in the chain ...)
+        } // for (... list of bucket heads ... )
+
+        if (symbolsSeen != mCount) { // sanity check
+            throw new Error("Internal error: count after rehash "+symbolsSeen+"; should be "+mCount);
+        }
+    }
+
+    /**
+     * Method called to find the best bucket to spill a Name over to:
+     * usually the first bucket that has only one entry, but in general
+     * first one of the buckets with least number of entries
+     */
+    private int findBestBucket()
+    {
+        Bucket[] buckets = mCollList;
+        int bestCount = Integer.MAX_VALUE;
+        int bestIx = -1;
+
+        for (int i = 0, len = mCollEnd; i < len; ++i) {
+            int count = buckets[i].length();
+            if (count < bestCount) {
+                if (count == 1) { // best possible
+                    return i;
+                }
+                bestCount = count;
+                bestIx = i;
+            }
+        }
+        return bestIx;
+    }
+
+    /**
+     * Method that needs to be called, if the main hash structure
+     * is (may be) shared. This happens every time something is added,
+     * even if addition is to the collision list (since collision list
+     * index comes from lowest 8 bits of the primary hash entry)
+     */
+    private void unshareMain()
+    {
+        int[] old = mMainHash;
+        int len = mMainHash.length;
+
+        mMainHash = new int[len];
+        System.arraycopy(old, 0, mMainHash, 0, len);
+        mMainHashShared = false;
+    }
+
+    private void unshareCollision()
+    {
+        Bucket[] old = mCollList;
+        if (old == null) {
+            mCollList = new Bucket[INITIAL_COLLISION_LEN];
+        } else {
+            int len = old.length;
+            mCollList = new Bucket[len];
+            System.arraycopy(old, 0, mCollList, 0, len);
+        }
+        mCollListShared = false;
+    }
+
+    private void unshareNames()
+    {
+        Name[] old = mMainNames;
+        int len = old.length;
+        mMainNames = new Name[len];
+        System.arraycopy(old, 0, mMainNames, 0, len);
+        mMainNamesShared = false;
+    }
+
+    private void expandCollision()
+    {
+        Bucket[] old = mCollList;
+        int len = old.length;
+        mCollList = new Bucket[len+len];
+        System.arraycopy(old, 0, mCollList, 0, len);
+    }
+
+    /*
+    /////////////////////////////////////////////////////
+    // Helper classes
+    /////////////////////////////////////////////////////
+     */
+
+    final static class Bucket
+    {
+        final Name mName;
+        final Bucket mNext;
+
+        Bucket(Name name, Bucket next)
+        {
+            mName = name;
+            mNext = next;
+        }
+
+        public int length()
+        {
+            int len = 1;
+            for (Bucket curr = mNext; curr != null; curr = curr.mNext) {
+                ++len;
+            }
+            return len;
+        }
+
+        public Name find(int hash, int firstQuad, int secondQuad)
+        {
+            if (mName.hashCode() == hash) {
+                if (mName.equals(firstQuad, secondQuad)) {
+                    return mName;
+                }
+            }
+            for (Bucket curr = mNext; curr != null; curr = curr.mNext) {
+                Name currName = curr.mName;
+                if (currName.hashCode() == hash) {
+                    if (currName.equals(firstQuad, secondQuad)) {
+                        return currName;
+                    }
+                }
+            }
+            return null;
+        }
+
+        public Name find(int hash, int[] quads, int qlen)
+        {
+            if (mName.hashCode() == hash) {
+                if (mName.equals(quads, qlen)) {
+                    return mName;
+                }
+            }
+            for (Bucket curr = mNext; curr != null; curr = curr.mNext) {
+                Name currName = curr.mName;
+                if (currName.hashCode() == hash) {
+                    if (currName.equals(quads, qlen)) {
+                        return currName;
+                    }
+                }
+            }
+            return null;
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/sym/NameFactory.java b/src/java/org/codehaus/jackson/sym/NameFactory.java
new file mode 100644
index 0000000..862188f
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/NameFactory.java
@@ -0,0 +1,47 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * Simple factory that can instantiate appropriate {@link Name}
+ * instances, given input data to use for construction. The main reason
+ * for a factory class here is just to insulate calling code from having
+ * to know details of concrete implementations.
+ */
+public final class NameFactory
+{
+    private NameFactory() { }
+
+    /*
+    //////////////////////////////////////////////////////////
+    // Public API
+    //////////////////////////////////////////////////////////
+     */
+
+    public static Name construct(int hash, String name, int q1, int q2)
+    {
+        name = name.intern();
+        if (q2 == 0) { // one quad only?
+            return new Name1(name, hash, q1);
+        }
+        return new Name2(name, hash, q1, q2);
+    }
+
+    public static Name construct(int hash, String name, int[] quads, int qlen)
+    {
+        name = name.intern();
+        if (qlen < 4) { // Need to check for 3 quad one, can do others too
+            if (qlen == 3) {
+                return new Name3(name, hash, quads[0], quads[1], quads[2]);
+            }
+            if (qlen == 2) {
+                return new Name2(name, hash, quads[0], quads[1]);
+            }
+            return new Name1(name, hash, quads[0]);
+        }
+        // Otherwise, need to copy the incoming buffer
+        int[] buf = new int[qlen];
+        for (int i = 0; i < qlen; ++i) {
+            buf[i] = quads[i];
+        }
+        return new NameN(name, hash, buf, qlen);
+    }
+}
diff --git a/src/java/org/codehaus/jackson/sym/NameN.java b/src/java/org/codehaus/jackson/sym/NameN.java
new file mode 100644
index 0000000..3ef246f
--- /dev/null
+++ b/src/java/org/codehaus/jackson/sym/NameN.java
@@ -0,0 +1,61 @@
+package org.codehaus.jackson.sym;
+
+/**
+ * Specialized implementation of PName: can be used for short Strings
+ * that consists of 9 to 12 bytes. It's the longest special purpose
+ * implementaion; longer ones are expressed using {@link NameN}.
+ */
+public final class NameN
+    extends Name
+{
+    final int[] mQuads;
+    final int mQuadLen;
+
+    NameN(String name, int hash, int[] quads, int quadLen)
+    {
+        super(name, hash);
+        mQuads = quads;
+        mQuadLen = quadLen;
+    }
+
+    public boolean equals(int quad1, int quad2)
+    {
+        // Unlikely to match... but:
+        if (mQuadLen < 3) {
+            if (mQuadLen == 1) {
+                return (mQuads[0] == quad1) && (quad2 == 0);
+            }
+            return (mQuads[0] == quad1) && (mQuads[1] == quad2);
+        }
+        return false;
+    }
+
+    public boolean equals(int[] quads, int qlen)
+    {
+        if (qlen == mQuadLen) {
+            for (int i = 0; i < qlen; ++i) {
+                if (quads[i] != mQuads[i]) {
+                    return false;
+                }
+            }
+            return true;
+        }
+        return false;
+    }
+
+    public int getFirstQuad() {
+        return mQuads[0];
+    }
+
+    public int getLastQuad() {
+        return mQuads[mQuadLen-1];
+    }
+
+    public int getQuad(int index)
+    {
+        return (index < mQuadLen) ? mQuads[index] : 0;
+    }
+
+    public int sizeInQuads() { return mQuadLen; }
+
+}
diff --git a/src/java/org/codehaus/jackson/util/BufferRecycler.java b/src/java/org/codehaus/jackson/util/BufferRecycler.java
new file mode 100644
index 0000000..f1c26bf
--- /dev/null
+++ b/src/java/org/codehaus/jackson/util/BufferRecycler.java
@@ -0,0 +1,94 @@
+package org.codehaus.jackson.util;
+
+/**
+ * This is a small utility class, whose main functionality is to allow
+ * simple reuse of raw byte/char buffers. It is usually used through
+ * <code>ThreadLocal</code> member of the owning class pointing to
+ * instance of this class through a <code>SoftReference</code>. The
+ * end result is a low-overhead GC-cleanable recycling: hopefully
+ * ideal for use by stream readers.
+ */
+public final class BufferRecycler
+{
+    public enum ByteBufferType {
+        READ_IO_BUFFER(4000)
+            ,WRITE_IO_BUFFER(4000);
+            
+        private final int size;
+
+        ByteBufferType(int size) { this.size = size; }
+    }
+
+    public enum CharBufferType {
+        TOKEN_BUFFER(2000) // Tokenizable input
+            ,CONCAT_BUFFER(2000) // concatenated output
+            ,TEXT_BUFFER(200) // Text content from input
+            ,BOOTSTRAP_BUFFER(200) // Temporary buffer for merged reader
+            ;
+        
+        private final int size;
+
+        CharBufferType(int size) { this.size = size; }
+    }
+
+    final protected byte[][] mByteBuffers = new byte[ByteBufferType.values().length][];
+    final protected char[][] mCharBuffers = new char[CharBufferType.values().length][];
+
+    public BufferRecycler() { }
+
+    public byte[] allocByteBuffer(ByteBufferType type)
+    {
+        int ix = type.ordinal();
+        byte[] buffer = mByteBuffers[ix];
+        if (buffer == null) {
+            buffer = balloc(type.size);
+        } else {
+            mByteBuffers[ix] = null;
+        }
+        return buffer;
+    }
+
+    public void releaseByteBuffer(ByteBufferType type, byte[] buffer)
+    {
+        mByteBuffers[type.ordinal()] = buffer;
+    }
+
+    public char[] allocCharBuffer(CharBufferType type)
+    {
+        return allocCharBuffer(type, 0);
+    }
+
+    public char[] allocCharBuffer(CharBufferType type, int minSize)
+    {
+        minSize = (minSize >= type.size) ? minSize : type.size;
+        int ix = type.ordinal();
+        char[] buffer = mCharBuffers[ix];
+        if (buffer == null || buffer.length < minSize) {
+            buffer = calloc(minSize);
+        } else {
+            mCharBuffers[ix] = null;
+        }
+        return buffer;
+    }
+
+    public void releaseCharBuffer(CharBufferType type, char[] buffer)
+    {
+        mCharBuffers[type.ordinal()] = buffer;
+    }
+
+    /*
+    //////////////////////////////////////////////////////////////
+    // Actual allocations separated for easier debugging/profiling
+    //////////////////////////////////////////////////////////////
+     */
+
+    private byte[] balloc(int size)
+    {
+        return new byte[size];
+    }
+
+    private char[] calloc(int size)
+    {
+        return new char[size];
+    }
+}
diff --git a/src/java/org/codehaus/jackson/util/CharTypes.java b/src/java/org/codehaus/jackson/util/CharTypes.java
new file mode 100644
index 0000000..2b917be
--- /dev/null
+++ b/src/java/org/codehaus/jackson/util/CharTypes.java
@@ -0,0 +1,105 @@
+package org.codehaus.jackson.util;
+
+import java.util.Arrays;
+
+public final class CharTypes
+{
+    final static char[] HEX_CHARS = "0123456789ABCDEF".toCharArray();
+
+    private CharTypes() { }
+
+    /**
+     * Lookup table used for determining which input characters
+     * need special handling when contained in text segment.
+     */
+    final static int[] sInputCodes;
+    static {
+        // backslash is ascii 94 or so
+        int[] table = new int[96];
+        // Control chars and non-space white space are not allowed unquoted
+        for (int i = 0; i < 32; ++i) {
+            table[i] = 1;
+        }
+        // And then string end and quote markers are special too
+        table['"'] = 1;
+        table['\\'] = 1;
+        sInputCodes = table;
+    }
+
+    /**
+     * Lookup table used for determining which output characters
+     * need to be quoted.
+     */
+    final static int[] sOutputEscapes;
+    static {
+        int[] table = new int[96];
+        // Control chars need generic escape sequence
+        for (int i = 0; i < 32; ++i) {
+            table[i] = -(i + 1);
+        }
+        /* Others (and some within that range too) have explicit shorter
+         * sequences
+         */
+        table['"'] = '"';
+        table['\\'] = '\\';
+        // Escaping of slash is optional, so let's not add it
+        table[0x08] = 'b';
+        table[0x09] = 't';
+        table[0x0C] = 'f';
+        table[0x0A] = 'n';
+        table[0x0D] = 'r';
+        sOutputEscapes = table;
+    }
+
+    /**
+     * Lookup table for the first 128 Unicode characters (7-bit ascii)
+     * range. For actual hex digits, contains corresponding value;
+     * for others -1.
+     */
+    final static int[] sHexValues = new int[128];
+    static {
+        Arrays.fill(sHexValues, -1);
+        for (int i = 0; i < 10; ++i) {
+            sHexValues['0' + i] = i;
+        }
+        for (int i = 0; i < 6; ++i) {
+            sHexValues['a' + i] = 10 + i;
+            sHexValues['A' + i] = 10 + i;
+        }
+    }
+
+    public static int[] getInputCode() { return sInputCodes; }
+    public static int[] getOutputEscapes() { return sOutputEscapes; }
+
+    public static int charToHex(int ch)
+    {
+        return (ch > 127) ? -1 : sHexValues[ch];
+    }
+
+    public static void appendQuoted(StringBuilder sb, String content)
+    {
+        final int[] escCodes = sOutputEscapes;
+        int escLen = escCodes.length;
+        for (int i = 0, len = content.length(); i < len; ++i) {
+            char c = content.charAt(i);
+            if (c >= escLen || escCodes[c] == 0) {
+                sb.append(c);
+                continue;
+            }
+            sb.append('\\');
+            int escCode = escCodes[c];
+            if (escCode < 0) { // generic quoting (hex value)
+                // We know that it has to fit in just 2 hex chars
+                sb.append('u');
+                sb.append('0');
+                sb.append('0');
+                int value = -(escCode + 1);
+                sb.append(HEX_CHARS[value >> 4]);
+                sb.append(HEX_CHARS[value & 0xF]);
+            } else { // "named", i.e. prepend with slash
+                sb.append((char) escCode);
+            }
+        }
+    }
+}
+
diff --git a/src/java/org/codehaus/jackson/util/SymbolTable.java b/src/java/org/codehaus/jackson/util/SymbolTable.java
new file mode 100644
index 0000000..f19194f
--- /dev/null
+++ b/src/java/org/codehaus/jackson/util/SymbolTable.java
@@ -0,0 +1,624 @@
+package org.codehaus.jackson.util;
+
+/**
+ * This class is a kind of specialized type-safe Map, from char array to
+ * String value. Specialization means that in addition to type-safety
+ * and specific access patterns (key char array, Value optionally interned
+ * String; values added on access if necessary), and that instances are
+ * meant to be used concurrently, but by using well-defined mechanisms
+ * to obtain such concurrently usable instances. Main use for the class
+ * is to store symbol table information for things like compilers and
+ * parsers; especially when number of symbols (keywords) is limited.
+ *<p>
+ * For optimal performance, usage pattern should be one where matches
+ * should be very common (esp. after "warm-up"), and as with most hash-based
+ * maps/sets, that hash codes are uniformly distributed. Also, collisions
+ * are slightly more expensive than with HashMap or HashSet, since hash codes
+ * are not used in resolving collisions; that is, equals() comparison is
+ * done with all symbols in same bucket index.<br />
+ * Finally, rehashing is also more expensive, as hash codes are not
+ * stored; rehashing requires all entries' hash codes to be recalculated.
+ * Reason for not storing hash codes is reduced memory usage, hoping
+ * for better memory locality.
+ *<p>
+ * Usual usage pattern is to create a single "master" instance, and either
+ * use that instance in sequential fashion, or to create derived "child"
+ * instances, which after use, are asked to return possible symbol additions
+ * to master instance. In either case benefit is that symbol table gets
+ * initialized so that further uses are more efficient, as eventually all
+ * symbols needed will already be in symbol table. At that point no more
+ * Symbol String allocations are needed, nor changes to symbol table itself.
+ *<p>
+ * Note that while individual SymbolTable instances are NOT thread-safe
+ * (much like generic collection classes), concurrently used "child"
+ * instances can be freely used without synchronization. However, using
+ * master table concurrently with child instances can only be done if
+ * access to master instance is read-only (ie. no modifications done).
+ */
+
+public final class SymbolTable
+{
+    /**
+     * Default initial table size. Shouldn't be miniscule (as there's
+     * cost to both array realloc and rehashing), but let's keep
+     * it reasonably small nonetheless. For systems that properly 
+     * reuse factories it doesn't matter either way; but when
+     * recreating factories often, initial overhead may dominate.
+     */
+    protected static final int DEFAULT_TABLE_SIZE = 64;
+
+    /**
+     * Config setting that determines whether Strings to be added need to be
+     * interned before being added or not. Forcing intern()ing will add
+     * some overhead when adding new Strings, but may be beneficial if such
+     * Strings are generally used by other parts of system. Note that even
+     * without interning, all returned String instances are guaranteed
+     * to be comparable with equality (==) operator; it's just that such
+     * guarantees are not made for Strings other classes return.
+     */
+    protected static final boolean INTERN_STRINGS = true;
+
+    /**
+     * Let's limit max size to 3/4 of 8k; this corresponds
+     * to 32k main hash index. This should allow for enough distinct
+     * names for almost any case.
+     */
+    final static int MAX_SYMBOL_TABLE_SIZE = 6000;
+
+    final static SymbolTable sBootstrapSymbolTable;
+    static {
+        sBootstrapSymbolTable = new SymbolTable(DEFAULT_TABLE_SIZE);
+    }
+
+    /*
+    ////////////////////////////////////////
+    // Configuration:
+    ////////////////////////////////////////
+     */
+
+    /**
+     * Sharing of learnt symbols is done by optional linking of symbol
+     * table instances with their parents. When parent linkage is
+     * defined, and child instance is released (call to <code>release</code>),
+     * parent's shared tables may be updated from the child instance.
+     */
+    protected SymbolTable mParent;
+
+    /*
+    ////////////////////////////////////////
+    // Actual symbol table data:
+    ////////////////////////////////////////
+     */
+
+    /**
+     * Primary matching symbols; it's expected most match occur from
+     * here.
+     */
+    protected String[] mSymbols;
+
+    /**
+     * Overflow buckets; if primary doesn't match, lookup is done
+     * from here.
+     *<p>
+     * Note: Number of buckets is half of number of symbol entries, on
+     * assumption there's less need for buckets.
+     */
+    protected Bucket[] mBuckets;
+
+    /**
+     * Current size (number of entries); needed to know if and when
+     * rehash.
+     */
+    protected int mSize;
+
+    /**
+     * Limit that indicates maximum size this instance can hold before
+     * it needs to be expanded and rehashed. Calculated using fill
+     * factor passed in to constructor.
+     */
+    protected int mSizeThreshold;
+
+    /**
+     * Mask used to get index from hash values; equal to
+     * <code>mBuckets.length - 1</code>, when mBuckets.length is
+     * a power of two.
+     */
+    protected int mIndexMask;
+
+    /*
+    ////////////////////////////////////////
+    // Information about concurrency
+    ////////////////////////////////////////
+     */
+
+    /**
+     * Flag that indicates if any changes have been made to the data;
+     * used to both determine if bucket array needs to be copied when
+     * (first) change is made, and potentially if updated bucket list
+     * is to be resync'ed back to master instance.
+     */
+    protected boolean mDirty;
+
+    /*
+    ////////////////////////////////////////
+    // Life-cycle:
+    ////////////////////////////////////////
+     */
+
+    public static SymbolTable createRoot()
+    {
+        return sBootstrapSymbolTable.makeOrphan();
+    }
+
+    /**
+     * Method for constructing a master symbol table instance.
+     */
+    public SymbolTable() {
+        this(DEFAULT_TABLE_SIZE);
+    }
+
+    /**
+     * Main method for constructing a master symbol table instance; will
+     * be called by other public constructors.
+     *
+     * @param initialSize Minimum initial size for bucket array; internally
+     *   will always use a power of two equal to or bigger than this value.
+     */
+    public SymbolTable(int initialSize)
+    {
+        // And we'll also set flags so no copying of buckets is needed:
+        mDirty = true;
+
+        // No point in requesting funny initial sizes...
+        if (initialSize < 1) {
+            throw new IllegalArgumentException("Can not use negative/zero initial size: "+initialSize);
+        }
+        /* Initial size has to be a power of two. And it shouldn't
+         * be ridiculously small either
+         */
+        {
+            int currSize = 4;
+            while (currSize < initialSize) {
+                currSize += currSize;
+            }
+            initialSize = currSize;
+        }
+
+        initTables(initialSize);
+    }
+
+    private void initTables(int initialSize)
+    {
+        mSymbols = new String[initialSize];
+        mBuckets = new Bucket[initialSize >> 1];
+        // Mask is easy to calc for powers of two.
+        mIndexMask = initialSize - 1;
+        mSize = 0;
+        // Hard-coded fill factor is 75%
+        mSizeThreshold = (initialSize - (initialSize >> 2));
+    }
+
+    /**
+     * Internal constructor used when creating child instances.
+     */
+    private SymbolTable(SymbolTable parent,
+                        String[] symbols, Bucket[] buckets, int size)
+    {
+        mParent = parent;
+
+        mSymbols = symbols;
+        mBuckets = buckets;
+        mSize = size;
+        // Hard-coded fill factor, 75%
+        int arrayLen = (symbols.length);
+        mSizeThreshold = arrayLen - (arrayLen >> 2);
+        mIndexMask =  (arrayLen - 1);
+
+        // Need to make copies of arrays, if/when adding new entries
+        mDirty = false;
+    }
+
+    /**
+     * "Factory" method; will create a new child instance of this symbol
+     * table. It will be a copy-on-write instance, ie. it will only use
+     * read-only copy of parent's data, but when changes are needed, a
+     * copy will be created.
+     *<p>
+     * Note: while this method is synchronized, it is generally not
+     * safe to both use makeChild/mergeChild, AND to use instance
+     * actively. Instead, a separate 'root' instance should be used
+     * on which only makeChild/mergeChild are called, but instance itself
+     * is not used as a symbol table.
+     */
+    public synchronized SymbolTable makeChild()
+    {
+        return new SymbolTable(this, mSymbols, mBuckets, mSize);
+    }
+
+    private SymbolTable makeOrphan()
+    {
+        return new SymbolTable(null, mSymbols, mBuckets, mSize);
+    }
+
+    /**
+     * Method that allows contents of child table to potentially be
+     * "merged in" with contents of this symbol table.
+     *<p>
+     * Note that caller has to make sure symbol table passed in is
+     * really a child or sibling of this symbol table.
+     *
+     * @return True, if merge was done; false if not
+     */
+    private synchronized boolean mergeChild(SymbolTable child)
+    {
+        /* One caveat: let's try to avoid problems with
+         * degenerate cases of documents with generated "random"
+         * names: for these, symbol tables would bloat indefinitely.
+         * One way to do this is to just purge tables if they grow
+         * too large, and that's what we'll do here.
+         */
+        if (child.size() > MAX_SYMBOL_TABLE_SIZE) {
+            /* Should there be a way to get notified about this
+             * event, to log it or such? (as it's somewhat abnormal
+             * thing to happen)
+             */
+            // At any rate, need to clean up the tables, then:
+            initTables(DEFAULT_TABLE_SIZE);
+        } else {
+            /* Otherwise, we'll merge chaged stuff in, if there are
+             * more entries (which may not be the case if one of siblings
+             * has added symbols first or such)
+             */
+            if (child.size() <= size()) { // nothing to add
+                return false;
+            }
+            // Okie dokie, let's get the data in!
+            mSymbols = child.mSymbols;
+            mBuckets = child.mBuckets;
+            mSize = child.mSize;
+            mSizeThreshold = child.mSizeThreshold;
+            mIndexMask = child.mIndexMask;
+        }
+        /* Dirty flag... well, let's just clear it, to force copying just
+         * in case. Shouldn't really matter, for master tables.
+         * (which this is, given something is merged to it etc)
+         */
+        mDirty = false;
+        return true;
+    }
+
+    public void release()
+    {
+        // If nothing has been added, nothing to do
+        if (!maybeDirty()) {
+            return;
+        }
+        if (mParent != null) {
+        	mParent.mergeChild(this);
+            /* Let's also mark this instance as dirty, so that just in
+             * case release was too early, there's no corruption
+             * of possibly shared data.
+             */
+            mDirty = false;
+        }
+    }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, generic accessors:
+    ////////////////////////////////////////////////////
+     */
+
+    public int size() { return mSize; }
+
+    public boolean maybeDirty() { return mDirty; }
+
+    /*
+    ////////////////////////////////////////////////////
+    // Public API, accessing symbols:
+    ////////////////////////////////////////////////////
+     */
+
+    public String findSymbol(char[] buffer, int start, int len, int hash)
+    {
+        if (len < 1) { // empty Strings are simplest to handle up front
+            return "";
+        }
+
+        hash &= mIndexMask;
+
+        String sym = mSymbols[hash];
+
+        // Optimal case; checking existing primary symbol for hash index:
+        if (sym != null) {
+            // Let's inline primary String equality checking:
+            if (sym.length() == len) {
+                int i = 0;
+                do {
+                    if (sym.charAt(i) != buffer[start+i]) {
+                        break;
+                    }
+                } while (++i < len);
+                // Optimal case; primary match found
+                if (i == len) {
+                    return sym;
+                }
+            }
+            // How about collision bucket?
+            Bucket b = mBuckets[hash >> 1];
+            if (b != null) {
+                sym = b.find(buffer, start, len);
+                if (sym != null) {
+                    return sym;
+                }
+            }
+        }
+
+        if (!mDirty) { //need to do copy-on-write?
+            copyArrays();
+            mDirty = true;
+        } else if (mSize >= mSizeThreshold) { // Need to expand?
+           rehash();
+            /* Need to recalc hash; rare occurence (index mask has been
+             * recalculated as part of rehash)
+             */
+            hash = calcHash(buffer, start, len) & mIndexMask;
+        }
+        ++mSize;
+
+        String newSymbol = new String(buffer, start, len);
+        if (INTERN_STRINGS) {
+            newSymbol = newSymbol.intern();
+        }
+        // Ok; do we need to add primary entry, or a bucket?
+        if (mSymbols[hash] == null) {
+            mSymbols[hash] = newSymbol;
+        } else {
+            int bix = hash >> 1;
+            mBuckets[bix] = new Bucket(newSymbol, mBuckets[bix]);
+        }
+
+        return newSymbol;
+    }
+
+    /**
+     * Similar to to {@link #findSymbol(char[],int,int,int)}; used to either
+     * do potentially cheap intern() (if table already has intern()ed version),
+     * or to pre-populate symbol table with known values.
+     */
+    public String findSymbol(String str)
+    {
+        int len = str.length();
+        // Sanity check:
+        if (len < 1) {
+            return "";
+        }
+
+        int index = calcHash(str) & mIndexMask;
+        String sym = mSymbols[index];
+
+        // Optimal case; checking existing primary symbol for hash index:
+        if (sym != null) {
+            // Let's inline primary String equality checking:
+            if (sym.length() == len) {
+                int i = 0;
+                for (; i < len; ++i) {
+                    if (sym.charAt(i) != str.charAt(i)) {
+                        break;
+                    }
+                }
+                // Optimal case; primary match found
+                if (i == len) {
+                    return sym;
+                }
+            }
+            // How about collision bucket?
+            Bucket b = mBuckets[index >> 1];
+            if (b != null) {
+                sym = b.find(str);
+                if (sym != null) {
+                    return sym;
+                }
+            }
+        }
+
+        // Need to expand?
+        if (mSize >= mSizeThreshold) {
+            rehash();
+            /* Need to recalc hash; rare occurence (index mask has been
+             * recalculated as part of rehash)
+             */
+            index = calcHash(str) & mIndexMask;
+        } else if (!mDirty) {
+            // Or perhaps we need to do copy-on-write?
+            copyArrays();
+            mDirty = true;
+        }
+        ++mSize;
+
+        if (INTERN_STRINGS) {
+            str = str.intern();
+        }
+        // Ok; do we need to add primary entry, or a bucket?
+        if (mSymbols[index] == null) {
+            mSymbols[index] = str;
+        } else {
+            int bix = index >> 1;
+            mBuckets[bix] = new Bucket(str, mBuckets[bix]);
+        }
+
+        return str;
+    }
+
+    /**
+     * Implementation of a hashing method for variable length
+     * Strings. Most of the time intention is that this calculation
+     * is done by caller during parsing, not here; however, sometimes
+     * it needs to be done for parsed "String" too.
+     *
+     * @param len Length of String; has to be at least 1 (caller guarantees
+     *   this pre-condition)
+     */
+    public static int calcHash(char[] buffer, int start, int len) {
+        int hash = (int) buffer[0];
+        for (int i = 1; i < len; ++i) {
+            hash = (hash * 31) + (int) buffer[i];
+        }
+        return hash;
+    }
+
+    public static int calcHash(String key) {
+        int hash = (int) key.charAt(0);
+        for (int i = 1, len = key.length(); i < len; ++i) {
+            hash = (hash * 31) + (int) key.charAt(i);
+
+        }
+        return hash;
+    }
+
+    /*
+    //////////////////////////////////////////////////////////
+    // Internal methods
+    //////////////////////////////////////////////////////////
+     */
+
+    /**
+     * Method called when copy-on-write is needed; generally when first
+     * change is made to a derived symbol table.
+     */
+    private void copyArrays() {
+        String[] oldSyms = mSymbols;
+        int size = oldSyms.length;
+        mSymbols = new String[size];
+        System.arraycopy(oldSyms, 0, mSymbols, 0, size);
+        Bucket[] oldBuckets = mBuckets;
+        size = oldBuckets.length;
+        mBuckets = new Bucket[size];
+        System.arraycopy(oldBuckets, 0, mBuckets, 0, size);
+    }
+
+    /**
+     * Method called when size (number of entries) of symbol table grows
+     * so big that load factor is exceeded. Since size has to remain
+     * power of two, arrays will then always be doubled. Main work
+     * is really redistributing old entries into new String/Bucket
+     * entries.
+     */
+    private void rehash()
+    {
+        int size = mSymbols.length;
+        int newSize = size + size;
+        String[] oldSyms = mSymbols;
+        Bucket[] oldBuckets = mBuckets;
+        mSymbols = new String[newSize];
+        mBuckets = new Bucket[newSize >> 1];
+        // Let's update index mask, threshold, now (needed for rehashing)
+        mIndexMask = newSize - 1;
+        mSizeThreshold += mSizeThreshold;
+        
+        int count = 0; // let's do sanity check
+
+        /* Need to do two loops, unfortunately, since spillover area is
+         * only half the size:
+         */
+        for (int i = 0; i < size; ++i) {
+            String symbol = oldSyms[i];
+            if (symbol != null) {
+                ++count;
+                int index = calcHash(symbol) & mIndexMask;
+                if (mSymbols[index] == null) {
+                    mSymbols[index] = symbol;
+                } else {
+                    int bix = index >> 1;
+                    mBuckets[bix] = new Bucket(symbol, mBuckets[bix]);
+                }
+            }
+        }
+
+        size >>= 1;
+        for (int i = 0; i < size; ++i) {
+            Bucket b = oldBuckets[i];
+            while (b != null) {
+                ++count;
+                String symbol = b.getSymbol();
+                int index = calcHash(symbol) & mIndexMask;
+                if (mSymbols[index] == null) {
+                    mSymbols[index] = symbol;
+                } else {
+                    int bix = index >> 1;
+                    mBuckets[bix] = new Bucket(symbol, mBuckets[bix]);
+                }
+                b = b.getNext();
+            }
+        }
+
+        if (count != mSize) {
+            throw new Error("Internal error on SymbolTable.rehash(): had "+mSize+" entries; now have "+count+".");
+        }
+    }
+
+    /*
+    //////////////////////////////////////////////////////////
+    // Bucket class
+    //////////////////////////////////////////////////////////
+     */
+
+    /**
+     * This class is a symbol table entry. Each entry acts as a node
+     * in a linked list.
+     */
+    static final class Bucket {
+        private final String mSymbol;
+        private final Bucket mNext;
+
+        public Bucket(String symbol, Bucket next) {
+            mSymbol = symbol;
+            mNext = next;
+        }
+
+        public String getSymbol() { return mSymbol; }
+        public Bucket getNext() { return mNext; }
+
+        public String find(char[] buf, int start, int len) {
+            String sym = mSymbol;
+            Bucket b = mNext;
+
+            while (true) { // Inlined equality comparison:
+                if (sym.length() == len) {
+                    int i = 0;
+                    do {
+                        if (sym.charAt(i) != buf[start+i]) {
+                            break;
+                        }
+                    } while (++i < len);
+                    if (i == len) {
+                        return sym;
+                    }
+                }
+                if (b == null) {
+                    break;
+                }
+                sym = b.getSymbol();
+                b = b.getNext();
+            }
+            return null;
+        }
+
+        public String find(String str) {
+            String sym = mSymbol;
+            Bucket b = mNext;
+
+            while (true) {
+                if (sym.equals(str)) {
+                    return sym;
+                }
+                if (b == null) {
+                    break;
+                }
+                sym = b.getSymbol();
+                b = b.getNext();
+            }
+            return null;
+        }
+    }
+}
diff --git a/src/java/org/codehaus/jackson/util/TextBuffer.java b/src/java/org/codehaus/jackson/util/TextBuffer.java
new file mode 100644
index 0000000..6ec3b1b
--- /dev/null
+++ b/src/java/org/codehaus/jackson/util/TextBuffer.java
@@ -0,0 +1,615 @@
+package org.codehaus.jackson.util;
+
+import java.io.*;
+import java.util.ArrayList;
+
+/**
+ * TextBuffer is a class similar to {@link StringBuffer}, with
+ * following differences:
+ *<ul>
+ *  <li>TextBuffer uses segments character arrays, to avoid having
+ *     to do additional array copies when array is not big enough.
+ *     This means that only reallocating that is necessary is done only once:
+ *     if and when caller
+ *     wants to access contents in a linear array (char[], String).
+ *    </li>
+*  <li>TextBuffer can also be initialized in "shared mode", in which
+*     it will just act as a wrapper to a single char array managed
+*     by another object (like parser that owns it)
+ *    </li>
+ *  <li>TextBuffer is not synchronized.
+ *    </li>
+ * </ul>
+ */
+public final class TextBuffer
+{
+    final static char[] NO_CHARS = new char[0];
+
+    // // // Configuration:
+
+    private final BufferRecycler mAllocator;
+
+    // // // Shared read-only input buffer:
+
+    /**
+     * Shared input buffer; stored here in case some input can be returned
+     * as is, without being copied to collector's own buffers. Note that
+     * this is read-only for this Objet.
+     */
+    private char[] mInputBuffer;
+
+    /**
+     * Character offset of first char in input buffer; -1 to indicate
+     * that input buffer currently does not contain any useful char data
+     */
+    private int mInputStart;
+
+    private int mInputLen;
+
+    // // // Internal non-shared collector buffers:
+
+    /**
+     * List of segments prior to currently active segment.
+     */
+    private ArrayList<char[]> mSegments;
+
+
+    // // // Currently used segment; not (yet) contained in mSegments
+
+    /**
+     * Amount of characters in segments in {@link mSegments}
+     */
+    private int mSegmentSize;
+
+    private char[] mCurrentSegment;
+
+    /**
+     * Number of characters in currently active (last) segment
+     */
+    private int mCurrentSize;
+
+    // // // Temporary caching for Objects to return
+
+    /**
+     * String that will be constructed when the whole contents are
+     * needed; will be temporarily stored in case asked for again.
+     */
+    private String mResultString;
+
+    private char[] mResultArray;
+
+    /*
+    //////////////////////////////////////////////
+    // Life-cycle
+    //////////////////////////////////////////////
+     */
+
+    public TextBuffer(BufferRecycler allocator)
+    {
+        mAllocator = allocator;
+    }
+
+    /**
+     * Method called to indicate that the underlying buffers should now
+     * be recycled if they haven't yet been recycled. Although caller
+     * can still use this text buffer, it is not advisable to call this
+     * method if that is likely, since next time a buffer is needed,
+     * buffers need to reallocated.
+     * Note: calling this method automatically also clears contents
+     * of the buffer.
+     */
+    public void releaseBuffers()
+    {
+        if (mAllocator != null && mCurrentSegment != null) {
+            // First, let's get rid of all but the largest char array
+            resetWithEmpty();
+            // And then return that array
+            char[] buf = mCurrentSegment;
+            mCurrentSegment = null;
+            mAllocator.releaseCharBuffer(BufferRecycler.CharBufferType.TEXT_BUFFER, buf);
+        }
+    }
+
+    /**
+     * Method called to clear out any content text buffer may have, and
+     * initializes buffer to use non-shared data.
+     */
+    public void resetWithEmpty()
+    {
+        mInputBuffer = null;
+        mInputStart = -1; // indicates shared buffer not used
+        mInputLen = 0;
+
+        mResultString = null;
+        mResultArray = null;
+
+        // And then reset internal input buffers, if necessary:
+        if (mSegments != null && mSegments.size() > 0) {
+            /* Let's start using _last_ segment from list; for one, it's
+             * the biggest one, and it's also most likely to be cached
+             */
+            mCurrentSegment = mSegments.get(mSegments.size() - 1);
+            mSegments.clear();
+            mSegmentSize = 0;
+        }
+        mCurrentSize = 0;
+    }
+
+    /**
+     * Method called to initialize the buffer with a shared copy of data;
+     * this means that buffer will just have pointers to actual data. It
+     * also means that if anything is to be appended to the buffer, it
+     * will first have to unshare it (make a local copy).
+     */
+    public void resetWithShared(char[] buf, int start, int len)
+    {
+        // First, let's clear intermediate values, if any:
+        mResultString = null;
+        mResultArray = null;
+
+        // Then let's mark things we need about input buffer
+        mInputBuffer = buf;
+        mInputStart = start;
+        mInputLen = len;
+
+        // And then reset internal input buffers, if necessary:
+        if (mSegments != null && mSegments.size() > 0) {
+            /* Let's start using _last_ segment from list; for one, it's
+             * the biggest one, and it's also most likely to be cached
+             */
+            mCurrentSegment = mSegments.get(mSegments.size() - 1);
+            mSegments.clear();
+            mCurrentSize = mSegmentSize = 0;
+        }
+    }
+
+    public void resetWithCopy(char[] buf, int start, int len)
+    {
+        mInputBuffer = null;
+        mInputStart = -1; // indicates shared buffer not used
+        mInputLen = 0;
+
+        mResultString = null;
+        mResultArray = null;
+
+        // And then reset internal input buffers, if necessary:
+        if (mSegments != null && mSegments.size() > 0) {
+            /* Let's start using last segment from list; for one, it's
+             * the biggest one, and it's also most likely to be cached
+             */
+            mCurrentSegment = mSegments.get(mSegments.size() - 1);
+            mSegments.clear();
+        }
+        mCurrentSize = mSegmentSize = 0;
+        append(buf, start, len);
+    }
+
+    public void resetWithString(String str)
+    {
+        // First things first, let's reset the buffer
+
+        mInputBuffer = null;
+        mInputStart = -1; // indicates shared buffer not used
+        mInputLen = 0;
+
+        mResultString = str;
+        mResultArray = null;
+
+        int len = str.length();
+
+        if (mSegments != null && mSegments.size() > 0) {
+            mCurrentSegment = mSegments.get(mSegments.size() - 1);
+            mSegments.clear();
+        } else if (mCurrentSegment == null) {
+            mCurrentSegment = allocBuffer(len);
+        }
+
+        // Ok, but does the String fit? If not, need to realloc
+        if (mCurrentSegment.length < len) {
+            mCurrentSegment = new char[len];
+        }
+        str.getChars(0, len, mCurrentSegment, 0);
+    }
+
+    private final char[] allocBuffer(int needed)
+    {
+        return mAllocator.allocCharBuffer(BufferRecycler.CharBufferType.TEXT_BUFFER, needed);
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Accessors for implementing StAX interface:
+    //////////////////////////////////////////////
+     */
+
+    /**
+     * @return Number of characters currently stored by this collector
+     */
+    public int size() {
+        if (mInputStart >= 0) { // shared copy from input buf
+            return mInputLen;
+        }
+        // local segmented buffers
+        return mSegmentSize + mCurrentSize;
+    }
+
+    public int getTextOffset()
+    {
+        /* Only shared input buffer can have non-zero offset; buffer
+         * segments start at 0, and if we have to create a combo buffer,
+         * that too will start from beginning of the buffer
+         */
+        return (mInputStart >= 0) ? mInputStart : 0;
+    }
+
+    public char[] getTextBuffer()
+    {
+        // Are we just using shared input buffer?
+        if (mInputStart >= 0) {
+            return mInputBuffer;
+        }
+        // Nope; but does it fit in just one segment?
+        if (mSegments == null || mSegments.size() == 0) {
+            return mCurrentSegment;
+        }
+        // Nope, need to have/create a non-segmented array and return it
+        return contentsAsArray();
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Accessors:
+    //////////////////////////////////////////////
+     */
+
+    public String contentsAsString()
+    {
+        if (mResultString == null) {
+            // Has array been requested? Can make a shortcut, if so:
+            if (mResultArray != null) {
+                mResultString = new String(mResultArray);
+            } else {
+                // Do we use shared array?
+                if (mInputStart >= 0) {
+                    if (mInputLen < 1) {
+                        return (mResultString = "");
+                    }
+                    mResultString = new String(mInputBuffer, mInputStart, mInputLen);
+                } else { // nope... need to copy
+                    // But first, let's see if we have just one buffer
+                    int segLen = mSegmentSize;
+                    int currLen = mCurrentSize;
+                    
+                    if (segLen == 0) { // yup
+                        mResultString = (currLen == 0) ? "" : new String(mCurrentSegment, 0, currLen);
+                    } else { // no, need to combine
+                        StringBuilder sb = new StringBuilder(segLen + currLen);
+                        // First stored segments
+                        if (mSegments != null) {
+                            for (int i = 0, len = mSegments.size(); i < len; ++i) {
+                                char[] curr = mSegments.get(i);
+                                sb.append(curr, 0, curr.length);
+                            }
+                        }
+                        // And finally, current segment:
+                        sb.append(mCurrentSegment, 0, mCurrentSize);
+                        mResultString = sb.toString();
+                    }
+                }
+            }
+        }
+        return mResultString;
+    }
+ 
+    public char[] contentsAsArray()
+    {
+        char[] result = mResultArray;
+        if (result == null) {
+            mResultArray = result = buildResultArray();
+        }
+        return result;
+    }
+
+    public int contentsToArray(int srcStart, char[] dst, int dstStart, int len) {
+
+        // Easy to copy from shared buffer:
+        if (mInputStart >= 0) {
+
+            int amount = mInputLen - srcStart;
+            if (amount > len) {
+                amount = len;
+            } else if (amount < 0) {
+                amount = 0;
+            }
+            if (amount > 0) {
+                System.arraycopy(mInputBuffer, mInputStart+srcStart,
+                                 dst, dstStart, amount);
+            }
+            return amount;
+        }
+
+        /* Could also check if we have array, but that'd only help with
+         * braindead clients that get full array first, then segments...
+         * which hopefully aren't that common
+         */
+
+        // Copying from segmented array is bit more involved:
+        int totalAmount = 0;
+        if (mSegments != null) {
+            for (int i = 0, segc = mSegments.size(); i < segc; ++i) {
+                char[] segment = mSegments.get(i);
+                int segLen = segment.length;
+                int amount = segLen - srcStart;
+                if (amount < 1) { // nothing from this segment?
+                    srcStart -= segLen;
+                    continue;
+                }
+                if (amount >= len) { // can get rest from this segment?
+                    System.arraycopy(segment, srcStart, dst, dstStart, len);
+                    return (totalAmount + len);
+                }
+                // Can get some from this segment, offset becomes zero:
+                System.arraycopy(segment, srcStart, dst, dstStart, amount);
+                totalAmount += amount;
+                dstStart += amount;
+                len -= amount;
+                srcStart = 0;
+            }
+        }
+
+        // Need to copy anything from last segment?
+        if (len > 0) {
+            int maxAmount = mCurrentSize - srcStart;
+            if (len > maxAmount) {
+                len = maxAmount;
+            }
+            if (len > 0) { // should always be true
+                System.arraycopy(mCurrentSegment, srcStart, dst, dstStart, len);
+                totalAmount += len;
+            }
+        }
+
+        return totalAmount;
+    }
+
+    /**
+     * Method that will stream contents of this buffer into specified
+     * Writer.
+     */
+    public int rawContentsTo(Writer w)
+        throws IOException
+    {
+        // Let's first see if we have created helper objects:
+        if (mResultArray != null) {
+            w.write(mResultArray);
+            return mResultArray.length;
+        }
+        if (mResultString != null) {
+            w.write(mResultString);
+            return mResultString.length();
+        }
+
+        // Do we use shared array?
+        if (mInputStart >= 0) {
+            if (mInputLen > 0) {
+                w.write(mInputBuffer, mInputStart, mInputLen);
+            }
+            return mInputLen;
+        }
+        // Nope, need to do full segmented output
+        int rlen = 0;
+        if (mSegments != null) {
+            for (int i = 0, len = mSegments.size(); i < len; ++i) {
+                char[] ch = mSegments.get(i);
+                w.write(ch);
+                rlen += ch.length;
+            }
+        }
+        if (mCurrentSize > 0) {
+            w.write(mCurrentSegment, 0, mCurrentSize);
+            rlen += mCurrentSize;
+        }
+        return rlen;
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Public mutators:
+    //////////////////////////////////////////////
+     */
+
+    /**
+     * Method called to make sure that buffer is not using shared input
+     * buffer; if it is, it will copy such contents to private buffer.
+     */
+    public void ensureNotShared() {
+        if (mInputStart >= 0) {
+            unshare(16);
+        }
+    }
+
+    public void append(char[] c, int start, int len)
+    {
+        // Can't append to shared buf (sanity check)
+        if (mInputStart >= 0) {
+            unshare(len);
+        }
+        mResultString = null;
+        mResultArray = null;
+
+        // Room in current segment?
+        char[] curr = mCurrentSegment;
+        int max = curr.length - mCurrentSize;
+            
+        if (max >= len) {
+            System.arraycopy(c, start, curr, mCurrentSize, len);
+            mCurrentSize += len;
+        } else {
+            // No room for all, need to copy part(s):
+            if (max > 0) {
+                System.arraycopy(c, start, curr, mCurrentSize, max);
+                start += max;
+                len -= max;
+            }
+            // And then allocate new segment; we are guaranteed to now
+            // have enough room in segment.
+            expand(len); // note: curr != mCurrentSegment after this
+            System.arraycopy(c, start, mCurrentSegment, 0, len);
+            mCurrentSize = len;
+        }
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Raw access, for high-performance use:
+    //////////////////////////////////////////////
+     */
+
+    public char[] getCurrentSegment()
+    {
+        /* Since the intention of the caller is to directly add stuff into
+         * buffers, we should NOT have anything in shared buffer... ie. may
+         * need to unshare contents.
+         */
+        if (mInputStart >= 0) {
+            unshare(1);
+        } else {
+            char[] curr = mCurrentSegment;
+            if (curr == null) {
+                mCurrentSegment = allocBuffer(0);
+            } else if (mCurrentSize >= curr.length) {
+                // Plus, we better have room for at least one more char
+                expand(1);
+            }
+        }
+        return mCurrentSegment;
+    }
+
+    public int getCurrentSegmentSize() {
+        return mCurrentSize;
+    }
+
+    public void setCurrentLength(int len) {
+        mCurrentSize = len;
+    }
+
+    public char[] finishCurrentSegment()
+    {
+        if (mSegments == null) {
+            mSegments = new ArrayList<char[]>();
+        }
+        mSegments.add(mCurrentSegment);
+        int oldLen = mCurrentSegment.length;
+        mSegmentSize += oldLen;
+        // Let's grow segments by 50%
+        char[] curr = new char[oldLen + (oldLen >> 1)];
+        mCurrentSize = 0;
+        mCurrentSegment = curr;
+        return curr;
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Standard methods:
+    //////////////////////////////////////////////
+     */
+
+    /**
+     * Note: calling this method may not be as efficient as calling
+     * {@link #contentsAsString}, since it's not guaranteed that resulting
+     * String is cached.
+     */
+    public String toString() {
+         return contentsAsString();
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Internal methods:
+    //////////////////////////////////////////////
+     */
+
+    /**
+     * Method called if/when we need to append content when we have been
+     * initialized to use shared buffer.
+     */
+    private void unshare(int needExtra)
+    {
+        int sharedLen = mInputLen;
+        mInputLen = 0;
+        char[] inputBuf = mInputBuffer;
+        mInputBuffer = null;
+        int start = mInputStart;
+        mInputStart = -1;
+
+        // Is buffer big enough, or do we need to reallocate?
+        int needed = sharedLen+needExtra;
+        if (mCurrentSegment == null || needed > mCurrentSegment.length) {
+            mCurrentSegment = allocBuffer(needed);
+        }
+        if (sharedLen > 0) {
+            System.arraycopy(inputBuf, start, mCurrentSegment, 0, sharedLen);
+        }
+        mSegmentSize = 0;
+        mCurrentSize = sharedLen;
+    }
+
+    /**
+     * Method called when current segment is full, to allocate new
+     * segment.
+     */
+    private void expand(int minNewSegmentSize)
+    {
+        // First, let's move current segment to segment list:
+        if (mSegments == null) {
+            mSegments = new ArrayList<char[]>();
+        }
+        char[] curr = mCurrentSegment;
+        mSegments.add(curr);
+        mSegmentSize += curr.length;
+        int oldLen = curr.length;
+        // Let's grow segments by 50% minimum
+        int sizeAddition = oldLen >> 1;
+        if (sizeAddition < minNewSegmentSize) {
+            sizeAddition = minNewSegmentSize;
+        }
+        curr = new char[oldLen + sizeAddition];
+        mCurrentSize = 0;
+        mCurrentSegment = curr;
+    }
+
+    private char[] buildResultArray()
+    {
+        if (mResultString != null) { // Can take a shortcut...
+            return mResultString.toCharArray();
+        }
+        char[] result;
+        
+        // Do we use shared array?
+        if (mInputStart >= 0) {
+            if (mInputLen < 1) {
+                return NO_CHARS;
+            }
+            result = new char[mInputLen];
+            System.arraycopy(mInputBuffer, mInputStart, result, 0,
+                             mInputLen);
+        } else { // nope 
+            int size = size();
+            if (size < 1) {
+                return NO_CHARS;
+            }
+            int offset = 0;
+            result = new char[size];
+            if (mSegments != null) {
+                for (int i = 0, len = mSegments.size(); i < len; ++i) {
+                    char[] curr = (char[]) mSegments.get(i);
+                    int currLen = curr.length;
+                    System.arraycopy(curr, 0, result, offset, currLen);
+                    offset += currLen;
+                }
+            }
+            System.arraycopy(mCurrentSegment, 0, result, offset, mCurrentSize);
+        }
+        return result;
+    }
+}
diff --git a/src/java/test/GenerateDoubleDoc.java b/src/java/test/GenerateDoubleDoc.java
new file mode 100644
index 0000000..d301b49
--- /dev/null
+++ b/src/java/test/GenerateDoubleDoc.java
@@ -0,0 +1,64 @@
+package test;
+
+import java.io.*;
+import java.util.Random;
+
+import org.codehaus.jackson.*;
+
+public class GenerateDoubleDoc
+{
+    final static int AVG_ARRAY_LEN = 32;
+
+    private GenerateDoubleDoc() { }
+
+    private void generate(OutputStream out, int kbSize)
+        throws IOException
+    {
+        int bsize = kbSize * 1000;
+
+        // Let's first buffer in memory, to know exact length
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(bsize + 500);
+        Random r = new Random(kbSize);
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(bos, JsonFactory.Encoding.UTF8);
+        gen.writeStartArray(); // outermost array
+        gen.writeStartArray(); // inner array
+
+        do {
+            // First, do we close current, start new array?
+            if (r.nextInt(AVG_ARRAY_LEN) == 3) { // to get average array length of 16
+                gen.writeEndArray();
+                if (r.nextBoolean()) {
+                    gen.writeRaw("\n");
+                }
+                gen.writeStartArray();
+                gen.flush();
+            }
+            // Then need to calculate number to output
+            float f;
+
+            do {
+                f = Float.intBitsToFloat(r.nextInt());
+            } while (Double.isNaN(f) || Double.isInfinite(f));
+            gen.writeNumber(f);
+        } while (bos.size() < bsize);
+
+        gen.writeEndArray();
+        gen.writeEndArray();
+        gen.writeRaw("\n");
+        gen.close();
+
+        bos.writeTo(out);
+    }
+
+    public static void main(String[] args)
+        throws Exception
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java test.GenerateDoubleDoc <size-in-kbytes>");
+            System.exit(1);
+        }
+        new GenerateDoubleDoc().generate(System.out, Integer.parseInt(args[0]));
+        System.out.flush();
+    }
+}
+
diff --git a/src/java/test/GenerateIntDoc.java b/src/java/test/GenerateIntDoc.java
new file mode 100644
index 0000000..9d1f973
--- /dev/null
+++ b/src/java/test/GenerateIntDoc.java
@@ -0,0 +1,69 @@
+package test;
+
+import java.io.*;
+import java.util.Random;
+
+import org.codehaus.jackson.*;
+
+public class GenerateIntDoc
+{
+    final static int AVG_ARRAY_LEN = 32;
+
+    private GenerateIntDoc() { }
+
+    private void generate(OutputStream out, int kbSize)
+        throws IOException
+    {
+        int bsize = kbSize * 1000;
+
+        // Let's first buffer in memory, to know exact length
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(bsize + 500);
+        Random r = new Random(kbSize);
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(bos, JsonFactory.Encoding.UTF8);
+        gen.writeStartArray(); // outermost array
+        gen.writeStartArray(); // inner array
+
+        do {
+            // First, do we close current, start new array?
+            if (r.nextInt(AVG_ARRAY_LEN) == 3) { // to get average array length of 16
+                gen.writeEndArray();
+                if (r.nextBoolean()) {
+                    gen.writeRaw("\n");
+                }
+                gen.writeStartArray();
+                gen.flush();
+            }
+            // Then need to calculate number to output
+            int nr = r.nextInt(32);
+            if (r.nextBoolean()) {
+                nr *= r.nextInt(256); // up to 8k
+                if (r.nextBoolean()) {
+                    nr *= r.nextInt(0x20000); // up to 1G
+                }
+            }
+            if (r.nextBoolean()) {
+                nr = -nr;
+            }
+            gen.writeNumber(nr);
+        } while (bos.size() < bsize);
+
+        gen.writeEndArray();
+        gen.writeEndArray();
+        gen.writeRaw("\n");
+        gen.close();
+
+        bos.writeTo(out);
+    }
+
+    public static void main(String[] args)
+        throws Exception
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java test.GenerateIntDoc <size-in-kbytes>");
+            System.exit(1);
+        }
+        new GenerateIntDoc().generate(System.out, Integer.parseInt(args[0]));
+        System.out.flush();
+    }
+}
+
diff --git a/src/java/test/TestIndenter.java b/src/java/test/TestIndenter.java
new file mode 100644
index 0000000..a3808da
--- /dev/null
+++ b/src/java/test/TestIndenter.java
@@ -0,0 +1,33 @@
+package test;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.*;
+
+public class TestIndenter
+{
+    public static void main(String[] args)
+        throws IOException
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java ... TestIndenter [file]");
+            System.exit(1);
+        }
+        JsonFactory f = new JsonFactory();
+        JsonParser jp = f.createJsonParser(new File(args[0]));
+        JsonNode jn = new JsonTypeMapper().read(jp);
+
+        StringWriter sw = new StringWriter(200);
+        JsonGenerator jg = f.createJsonGenerator(sw);
+
+        jg.useDefaultPrettyPrinter();
+
+        jn.writeTo(jg);
+        jg.close();
+
+        System.out.println("DOC-><");
+        System.out.println(sw.toString());
+        System.out.println(">");
+    }
+}
diff --git a/src/java/test/TestJavaMapper.java b/src/java/test/TestJavaMapper.java
new file mode 100644
index 0000000..efbf282
--- /dev/null
+++ b/src/java/test/TestJavaMapper.java
@@ -0,0 +1,26 @@
+package test;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.*;
+
+public class TestJavaMapper
+{
+    private TestJavaMapper() { }
+
+    public static void main(String[] args)
+        throws Exception
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java test.TestJavaMapper <file>");
+            System.exit(1);
+        }
+        FileInputStream in = new FileInputStream(new File(args[0]));
+        JsonParser jp = new JsonFactory().createJsonParser(in);
+        Object result = new JavaTypeMapper().read(jp);
+        jp.close();
+        System.out.println("Result: <"+result+">");
+    }
+}
+
diff --git a/src/maven/jackson-asl.pom b/src/maven/jackson-asl.pom
new file mode 100644
index 0000000..a5897d4
--- /dev/null
+++ b/src/maven/jackson-asl.pom
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project>
+
+ <!-- General information -->
+
+  <modelVersion>q.0.0</modelVersion>
+  <groupId>org.codehaus.jackson</groupId>
+  <artifactId>jackso-asl</artifactId>
+  <name>Jackson</name>
+  <version>@VERSION@</version>
+  <description>Jackson is a high-performance JSON processor (parser, generator)
+</description>
+
+ <!-- Contact information -->
+
+  <url>http://jackson.codehaus.org</url>
+  <issueManagement>
+    <url>http://jira.codehaus.org/browse/JACKSON</url>
+  </issueManagement>
+
+ <!-- Dependency information -->
+ 
+  <dependencies>
+    <!-- no dependencies, for now -->
+  </dependencies>
+
+  <!-- Licensing (joy!) -->
+  <licenses>
+    <license>
+      <name>The Apache Software License, Version 2.0</name>
+      <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+      <distribution>repo</distribution>
+    </license>
+  </licenses>
+
+  <organization>
+    <name>Codehaus</name>
+    <url>http://www.codehaus.org/</url>
+  </organization>
+
+</project>
diff --git a/src/maven/jackson-lgpl.pom b/src/maven/jackson-lgpl.pom
new file mode 100644
index 0000000..4fe5d41
--- /dev/null
+++ b/src/maven/jackson-lgpl.pom
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project>
+
+ <!-- General information -->
+
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.codehaus.jackson</groupId>
+  <artifactId>jackson-lgpl</artifactId>
+  <name>Jackson</name>
+  <version>@VERSION@</version>
+  <description>Jackson is a high-performance JSON processor (parser, generator)
+
+ <!-- Contact information -->
+
+  <url>http://jackson.codehaus.org</url>
+  <issueManagement>
+    <url>http://jira.codehaus.org/browse/JACKSON</url>
+  </issueManagement>
+
+ <!-- Dependency information -->
+ 
+  <dependencies>
+    <!-- no dependencies, for now -->
+  </dependencies>
+
+  <!-- Licensing (joy!) -->
+  <licenses>
+    <license>
+      <name>GNU Lesser General Public License (LGPL), Version 2.1</name>
+      <url>http://www.fsf.org/licensing/licenses/lgpl.txt</url>
+      <distribution>repo</distribution>
+    </license>
+  </licenses>
+
+  <organization>
+    <name>Codehaus</name>
+    <url>http://www.codehaus.org/</url>
+  </organization>
+</project>
diff --git a/src/perf/TestCopyPerf.java b/src/perf/TestCopyPerf.java
new file mode 100644
index 0000000..aaa38c5
--- /dev/null
+++ b/src/perf/TestCopyPerf.java
@@ -0,0 +1,67 @@
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.JsonTypeMapper;
+import org.codehaus.jackson.map.JsonNode;
+
+public final class TestCopyPerf
+{
+    private final static int REPS = 2500;
+
+    final JsonFactory mJsonFactory;
+
+    final JsonNode mTree;
+
+    private TestCopyPerf(File f)
+        throws Exception
+    {
+        mJsonFactory = new JsonFactory();
+        FileInputStream fis = new FileInputStream(f);
+        JsonTypeMapper mapper = new JsonTypeMapper();
+        JsonParser jp = mJsonFactory.createJsonParser(fis);
+        mTree = mapper.read(jp);
+        jp.close();
+    }
+
+    public void test()
+        throws Exception
+    {
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(2000);
+        testCopy(1, bos);
+        System.out.println("Output length: "+bos.size());
+        System.out.println();
+
+        while (true) {
+            try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+
+            long curr = System.currentTimeMillis();
+            int result = testCopy(REPS, bos);
+            curr = System.currentTimeMillis() - curr;
+            System.out.println("Took "+curr+" msecs ("
+                               +(result & 0xFF)+").");
+        }
+    }
+
+    private int testCopy(int reps, ByteArrayOutputStream bos)
+        throws IOException
+    {
+        JsonGenerator jg = null;
+        while (--reps >= 0) {
+            bos.reset();
+            jg = mJsonFactory.createJsonGenerator(bos, JsonFactory.Encoding.UTF8);
+            mTree.writeTo(jg);
+            jg.close();
+        }
+        return jg.hashCode();
+    }
+
+    public static void main(String[] args)
+        throws Exception
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java ... <file>");
+            System.exit(1);
+        }
+        new TestCopyPerf(new File(args[0])).test();
+    }
+}
diff --git a/src/perf/TestJsonPerf.java b/src/perf/TestJsonPerf.java
new file mode 100644
index 0000000..84c5be9
--- /dev/null
+++ b/src/perf/TestJsonPerf.java
@@ -0,0 +1,250 @@
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.io.IOContext;
+import org.codehaus.jackson.io.UTF8Reader;
+import org.codehaus.jackson.map.JavaTypeMapper;
+import org.codehaus.jackson.map.JsonTypeMapper;
+import org.codehaus.jackson.util.BufferRecycler;
+
+// json.org's reference implementation
+import org.json.*;
+// StringTree implementation
+import org.stringtree.json.JSONReader;
+// Jsontool implementation
+import com.sdicons.json.parser.JSONParser;
+// Noggit:
+//import org.apache.noggit.JSONParser;
+
+public final class TestJsonPerf
+{
+    private final static int REPS = 2500;
+
+    private final static int TEST_PER_GC = 5;
+
+    final JsonFactory mJsonFactory;
+
+    final byte[] mData;
+
+    protected int mBatchSize;
+
+    public TestJsonPerf(File f)
+        throws Exception
+    {
+        mJsonFactory = new JsonFactory();
+        mData = readData(f);
+
+        System.out.println("Read "+mData.length+" bytes from '"+f+"'");
+        System.out.println();
+    }
+
+    public void test()
+        throws Exception
+    {
+        int i = 0;
+        int sum = 0;
+
+        while (true) {
+            try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+            // Use 7 to test all...
+            int round = (i++ % 2);
+            
+            long curr = System.currentTimeMillis();
+            String msg;
+            boolean lf = false;
+
+            switch (round) {
+            case 0:
+                msg = "Jackson, stream";
+                sum += testJacksonStream(REPS);
+                break;
+            case 1:
+                lf = true;
+                msg = "Noggit";
+                sum += testNoggit(REPS);
+                break;
+            case 2:
+                msg = "Jackson, Java types";
+                sum += testJacksonJavaTypes(REPS);
+                break;
+            case 3:
+                msg = "Jackson, JSON types";
+                sum += testJacksonJavaTypes(REPS);
+                break;
+            case 4:
+                msg = "Json.org";
+                sum += testJsonOrg(REPS);
+                break;
+            case 5:
+                msg = "JSONTools (berlios.de)";
+                sum += testJsonTools(REPS);
+                break;
+            case 6:
+                msg = "StringTree";
+                sum += testStringTree(REPS);
+                break;
+            default:
+                throw new Error("Internal error");
+            }
+
+            curr = System.currentTimeMillis() - curr;
+            if (lf) {
+                System.out.println();
+            }
+            System.out.println("Test '"+msg+"' -> "+curr+" msecs ("
+                               +(sum & 0xFF)+").");
+
+
+            if ((i % TEST_PER_GC) == 0) {
+                System.out.println("[GC]");
+                try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+                System.gc();
+                try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+            }
+        }
+    }
+
+    private final byte[] readData(File f)
+        throws IOException
+    {
+        int len = (int) f.length();
+        byte[] data = new byte[len];
+        int offset = 0;
+        FileInputStream fis = new FileInputStream(f);
+        
+        while (len > 0) {
+            int count = fis.read(data, offset, len-offset);
+            offset += count;
+            len -= count;
+        }
+
+        return data;
+    }
+
+    protected int testJsonOrg(int reps)
+        throws Exception
+    {
+        Object ob = null;
+        for (int i = 0; i < reps; ++i) {
+            // Json.org's code only accepts Strings:
+            String input = new String(mData, "UTF-8");
+            JSONTokener tok = new JSONTokener(input);
+            ob = tok.nextValue();
+        }
+        return ob.hashCode();
+    }
+
+    protected int testJsonTools(int reps)
+        throws Exception
+    {
+        Object ob = null;
+        for (int i = 0; i < reps; ++i) {
+            // Json-tools accepts streams, yay!
+            JSONParser jp = new JSONParser(new ByteArrayInputStream(mData), "byte stream");
+            /* Hmmmh. Will we get just one object for the whole thing?
+             * Or a stream? Seems like just one
+             */
+            //while ((ob = jp.nextValue()) != null) { ; }
+            ob = jp.nextValue();
+        }
+        return ob.hashCode();
+    }
+
+    protected int testStringTree(int reps)
+        throws Exception
+    {
+        Object ob = null;
+        for (int i = 0; i < reps; ++i) {
+            // StringTree impl only accepts Strings:
+            String input = new String(mData, "UTF-8");
+            ob = new JSONReader().read(input);
+        }
+        return ob.hashCode();
+    }
+
+    protected int testNoggit(int reps)
+        throws Exception
+    {
+        ByteArrayInputStream bin = new ByteArrayInputStream(mData);
+
+        char[] cbuf = new char[mData.length];
+
+        IOContext ctxt = new IOContext(new BufferRecycler(), this);
+        int sum = 0;
+
+        for (int i = 0; i < reps; ++i) {
+            /* This may be unfair advantage (allocating buffer of exact
+             * size)? But let's do that for now
+             */
+            //char[] cbuf = new char[mData.length];
+            //InputStreamReader r = new InputStreamReader(bin, "UTF-8");
+            byte[] bbuf = ctxt.allocReadIOBuffer();
+            UTF8Reader r = new UTF8Reader(ctxt, bin, bbuf, 0, 0);
+
+            bin.reset();
+            org.apache.noggit.JSONParser jp = new org.apache.noggit.JSONParser(r, cbuf);
+            int type;
+            while ((type = jp.nextEvent()) != org.apache.noggit.JSONParser.EOF) {
+                if (type == org.apache.noggit.JSONParser.STRING) {
+                    sum += jp.getString().length();
+                }
+            }
+        }
+        return sum;
+    }
+
+    protected int testJacksonStream(int reps)
+        throws Exception
+    {
+        int sum = 0;
+        for (int i = 0; i < reps; ++i) {
+            JsonParser jp = mJsonFactory.createJsonParser(new ByteArrayInputStream(mData));
+            JsonToken t;
+            while ((t = jp.nextToken()) != null) {
+                // Field names are always constructed
+                if (t == JsonToken.VALUE_STRING) {
+                    sum += jp.getText().length();
+                }
+            }
+            jp.close();
+        }
+        return sum;
+    }
+
+    protected int testJacksonJavaTypes(int reps)
+        throws Exception
+    {
+        Object ob = null;
+        JavaTypeMapper mapper = new JavaTypeMapper();
+        for (int i = 0; i < reps; ++i) {
+            JsonParser jp = mJsonFactory.createJsonParser(new ByteArrayInputStream(mData));
+            ob = mapper.read(jp);
+            jp.close();
+        }
+        return ob.hashCode(); // just to get some non-optimizable number
+    }
+
+    protected int testJacksonJsonTypes(int reps)
+        throws Exception
+    {
+        Object ob = null;
+        JsonTypeMapper mapper = new JsonTypeMapper();
+        for (int i = 0; i < reps; ++i) {
+            JsonParser jp = mJsonFactory.createJsonParser(new ByteArrayInputStream(mData));
+            ob = mapper.read(jp);
+            jp.close();
+        }
+        return ob.hashCode(); // just to get some non-optimizable number
+    }
+
+    public static void main(String[] args)
+        throws Exception
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java ... <file>");
+            System.exit(1);
+        }
+        new TestJsonPerf(new File(args[0])).test();
+    }
+}
+
diff --git a/src/perf/TestReadPerf.java b/src/perf/TestReadPerf.java
new file mode 100644
index 0000000..bc4c46b
--- /dev/null
+++ b/src/perf/TestReadPerf.java
@@ -0,0 +1,89 @@
+import java.io.*;
+
+import org.codehaus.jackson.*;
+
+public final class TestReadPerf
+{
+    private final static int REPS = 2500;
+
+    private final static int TEST_PER_GC = 5;
+
+    final JsonFactory mJsonFactory;
+
+    final byte[] mData;
+
+    private TestReadPerf(File f)
+        throws Exception
+    {
+        mJsonFactory = new JsonFactory();
+        mData = readData(f);
+    }
+
+    public void test()
+        throws Exception
+    {
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(2000);
+        System.out.println("Output length: "+bos.size());
+        System.out.println();
+
+        int counter = 0;
+
+        while (true) {
+            try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+
+            long curr = System.currentTimeMillis();
+            int result = testRead(REPS);
+            curr = System.currentTimeMillis() - curr;
+            System.out.println("Took "+curr+" msecs ("
+                               +(result & 0xFF)+").");
+            if (++counter >= TEST_PER_GC) {
+                counter = 0;
+                try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+                System.out.println("[GC]");
+                System.gc();
+                try {  Thread.sleep(100L); } catch (InterruptedException ie) { }
+            }
+        }
+    }
+
+    private int testRead(int reps)
+        throws IOException
+    {
+        JsonParser jp = null;
+        while (--reps >= 0) {
+            jp = mJsonFactory.createJsonParser(new ByteArrayInputStream(mData));
+            while (jp.nextToken() != null) {
+                ;
+            }
+            jp.close();
+        }
+        return jp.hashCode();
+    }
+
+    private final byte[] readData(File f)
+        throws IOException
+    {
+        int len = (int) f.length();
+        byte[] data = new byte[len];
+        int offset = 0;
+        FileInputStream fis = new FileInputStream(f);
+        
+        while (len > 0) {
+            int count = fis.read(data, offset, len-offset);
+            offset += count;
+            len -= count;
+        }
+
+        return data;
+    }
+
+    public static void main(String[] args)
+        throws Exception
+    {
+        if (args.length != 1) {
+            System.err.println("Usage: java ... <file>");
+            System.exit(1);
+        }
+        new TestReadPerf(new File(args[0])).test();
+    }
+}
diff --git a/src/test/main/BaseTest.java b/src/test/main/BaseTest.java
new file mode 100644
index 0000000..7c4390c
--- /dev/null
+++ b/src/test/main/BaseTest.java
@@ -0,0 +1,109 @@
+package main;
+
+import java.io.*;
+
+import junit.framework.TestCase;
+
+import org.codehaus.jackson.*;
+
+public class BaseTest
+    extends TestCase
+{
+    /*
+    ////////////////////////////////////////////////////////
+    // Some sample documents:
+    ////////////////////////////////////////////////////////
+     */
+
+    protected final static int SAMPLE_SPEC_VALUE_WIDTH = 800;
+    protected final static int SAMPLE_SPEC_VALUE_HEIGHT = 600;
+    protected final static String SAMPLE_SPEC_VALUE_TITLE = "View from 15th Floor";
+    protected final static String SAMPLE_SPEC_VALUE_TN_URL = "http://www.example.com/image/481989943";
+    protected final static int SAMPLE_SPEC_VALUE_TN_HEIGHT = 125;
+    protected final static String SAMPLE_SPEC_VALUE_TN_WIDTH = "100";
+    protected final static int SAMPLE_SPEC_VALUE_TN_ID1 = 116;
+    protected final static int SAMPLE_SPEC_VALUE_TN_ID2 = 943;
+    protected final static int SAMPLE_SPEC_VALUE_TN_ID3 = 234;
+    protected final static int SAMPLE_SPEC_VALUE_TN_ID4 = 38793;
+
+    protected final static String SAMPLE_DOC_JSON_SPEC = 
+        "{\n"
+        +"  \"Image\" : {\n"
+        +"    \"Width\" : "+SAMPLE_SPEC_VALUE_WIDTH+",\n"
+        +"    \"Height\" : "+SAMPLE_SPEC_VALUE_HEIGHT+","
+        +"\"Title\" : \""+SAMPLE_SPEC_VALUE_TITLE+"\",\n"
+        +"    \"Thumbnail\" : {\n"
+        +"      \"Url\" : \""+SAMPLE_SPEC_VALUE_TN_URL+"\",\n"
+        +"\"Height\" : "+SAMPLE_SPEC_VALUE_TN_HEIGHT+",\n"
+        +"      \"Width\" : \""+SAMPLE_SPEC_VALUE_TN_WIDTH+"\"\n"
+        +"    },\n"
+        +"    \"IDs\" : ["+SAMPLE_SPEC_VALUE_TN_ID1+","+SAMPLE_SPEC_VALUE_TN_ID2+","+SAMPLE_SPEC_VALUE_TN_ID3+","+SAMPLE_SPEC_VALUE_TN_ID4+"]\n"
+        +"  }"
+        +"}"
+        ;
+
+    /*
+    ////////////////////////////////////////////////////////
+    // Parser/generator construction
+    ////////////////////////////////////////////////////////
+     */
+
+    protected JsonParser createParserUsingReader(String input)
+        throws IOException, JsonParseException
+    {
+        return new JsonFactory().createJsonParser(new StringReader(input));
+    }
+
+    protected JsonParser createParserUsingStream(String input, String encoding)
+        throws IOException, JsonParseException
+    {
+        byte[] data = input.getBytes(encoding);
+        InputStreamReader is = new InputStreamReader(new ByteArrayInputStream(data), encoding);
+        return new JsonFactory().createJsonParser(is);
+    }
+
+    /*
+    ////////////////////////////////////////////////////////
+    // Additional assertion methods
+    ////////////////////////////////////////////////////////
+     */
+
+    protected void assertToken(JsonToken expToken, JsonToken actToken)
+    {
+        if (actToken != expToken) {
+            fail("Expected token "+expToken+", current token "+actToken);
+        }
+    }
+
+    protected void assertToken(JsonToken expToken, JsonParser jp)
+    {
+        assertToken(expToken, jp.getCurrentToken());
+    }
+
+    protected void verifyException(Exception e, String match)
+    {
+        String msg = e.getMessage();
+        if (msg.indexOf(match) < 0) {
+            fail("Expected an exception with sub-string \""+match+"\": got one with message \""+msg+"\"");
+        }
+    }
+
+    /**
+     * Method that gets textual contents of the current token using
+     * available methods, and ensures results are consistent, before
+     * returning them
+     */
+    protected String getAndVerifyText(JsonParser jp)
+        throws IOException, JsonParseException
+    {
+        String str = jp.getText();
+
+        // Ok, let's verify other accessors
+        int actLen = jp.getTextLength();
+        assertEquals(str.length(), actLen);
+        char[] ch = jp.getTextCharacters();
+        /*String str2 =*/ new String(ch, jp.getTextOffset(), actLen);
+
+        return str;
+    }
+}
diff --git a/src/test/main/TestArrayParsing.java b/src/test/main/TestArrayParsing.java
new file mode 100644
index 0000000..36f6726
--- /dev/null
+++ b/src/test/main/TestArrayParsing.java
@@ -0,0 +1,73 @@
+package main;
+
+import org.codehaus.jackson.*;
+
+/**
+ * Set of additional unit for verifying array parsing, specifically
+ * edge cases.
+ */
+public class TestArrayParsing
+    extends BaseTest
+{
+    public void testValidEmpty()
+        throws Exception
+    {
+        final String DOC = "[   \n  ]";
+
+        JsonParser jp = createParserUsingStream(DOC, "UTF-8");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.END_ARRAY, jp.nextToken());
+        assertNull(jp.nextToken());
+        jp.close();
+    }
+
+    public void testInvalidEmptyMissingClose()
+        throws Exception
+    {
+        final String DOC = "[ ";
+
+        JsonParser jp = createParserUsingStream(DOC, "UTF-8");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+
+        try {
+            jp.nextToken();
+            fail("Expected a parsing error for missing array close marker");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "expected close marker for ARRAY");
+        }
+    }
+
+    public void testInvalidMissingFieldName()
+        throws Exception
+    {
+        final String DOC = "[  : 3 ] ";
+
+        JsonParser jp = createParserUsingStream(DOC, "UTF-8");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+
+        try {
+            jp.nextToken();
+            fail("Expected a parsing error for missing array close marker");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "Unexpected character");
+        }
+    }
+
+    public void testInvalidExtraComma()
+        throws Exception
+    {
+        final String DOC = "[ 24, ] ";
+
+        JsonParser jp = createParserUsingStream(DOC, "UTF-8");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(24, jp.getIntValue());
+
+        try {
+            jp.nextToken();
+            fail("Expected a parsing error for missing array close marker");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "expected a value");
+        }
+    }
+}
diff --git a/src/test/main/TestCharEscaping.java b/src/test/main/TestCharEscaping.java
new file mode 100644
index 0000000..95f0cab
--- /dev/null
+++ b/src/test/main/TestCharEscaping.java
@@ -0,0 +1,100 @@
+package main;
+
+import org.codehaus.jackson.*;
+
+/**
+ * Set of basic unit tests for verifying that the basic parser
+ * functionality works as expected.
+ */
+public class TestCharEscaping
+    extends BaseTest
+{
+    public void testMissingEscaping()
+        throws Exception
+    {
+        // Invalid: control chars, including lf, must be escaped
+        final String DOC = "["
+            +"\"Linefeed: \n.\""
+            +"]";
+        JsonParser jp = createParserUsingReader(DOC);
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        try {
+            // This may or may not trigger exception
+            JsonToken t = jp.nextToken();
+            assertToken(JsonToken.VALUE_STRING, t);
+            // and if not, should get it here:
+            jp.getText();
+            fail("Expected an exception for un-escaped linefeed in string value");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "has to be escaped");
+        }
+    }
+
+    public void testSimpleEscaping()
+        throws Exception
+    {
+        String DOC = "["
+            +"\"LF=\\n\""
+            +"]";
+
+        JsonParser jp = createParserUsingReader(DOC);
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("LF=\n", jp.getText());
+        jp.close();
+
+
+        /* Note: must split Strings, so that javac won't try to handle
+         * escape and inline null char
+         */
+        DOC = "[\"NULL:\\u0000!\"]";
+
+        jp = createParserUsingReader(DOC);
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("NULL:\0!", jp.getText());
+
+        // Then just a single char escaping
+        jp = createParserUsingReader("[\"\\u0123\"]");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("\u0123", jp.getText());
+
+        // And then double sequence
+        jp = createParserUsingReader("[\"\\u0041\\u0043\"]");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("AC", jp.getText());
+    }
+
+    public void testInvalid()
+        throws Exception
+    {
+        // 2-char sequences not allowed:
+        String DOC = "[\"\\u41=A\"]";
+        JsonParser jp = createParserUsingReader(DOC);
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        try {
+            jp.nextToken();
+            jp.getText();
+            fail("Expected an exception for unclosed ARRAY");
+        } catch (JsonParseException jpe) {
+            verifyException(jpe, "for character escape");
+        }
+    }
+
+    /**
+     * Test to verify that decoder does not allow 8-digit escapes
+     * (non-BMP characters must be escaped using two 4-digit sequences)
+     */
+    public void test8DigitSequence()
+        throws Exception
+    {
+        String DOC = "[\"\\u00411234\"]";
+        JsonParser jp = createParserUsingReader(DOC);
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("A1234", jp.getText());
+    }
+}
+
diff --git a/src/test/main/TestJsonGenerator.java b/src/test/main/TestJsonGenerator.java
new file mode 100644
index 0000000..5866005
--- /dev/null
+++ b/src/test/main/TestJsonGenerator.java
@@ -0,0 +1,325 @@
+package main;
+
+import org.codehaus.jackson.*;
+
+import java.io.*;
+
+/**
+ * Set of basic unit tests for verifying that the basic generator
+ * functionality works as expected.
+ */
+public class TestJsonGenerator
+    extends BaseTest
+{
+    // // // First, tests for primitive (non-structured) values
+
+    public void testStringWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        String VALUE = "";
+        gen.writeString(VALUE);
+        gen.close();
+        String docStr = sw.toString();
+        JsonParser jp = createParserUsingReader(docStr);
+        JsonToken t = jp.nextToken();
+        assertNotNull("Document \""+docStr+"\" yielded no tokens", t);
+        assertEquals(JsonToken.VALUE_STRING, t);
+        assertEquals(VALUE, jp.getText());
+        assertEquals(null, jp.nextToken());
+        jp.close();
+    }
+
+    public void testIntWrite()
+        throws Exception
+    {
+        doTestIntWrite(false);
+        doTestIntWrite(true);
+    }
+
+    public void testLongWrite()
+        throws Exception
+    {
+        doTestLongWrite(false);
+        doTestLongWrite(true);
+    }
+
+    public void testBooleanWrite()
+        throws Exception
+    {
+        for (int i = 0; i < 4; ++i) {
+            boolean state = (i & 1) == 0;
+            boolean pad = (i & 2) == 0;
+            StringWriter sw = new StringWriter();
+            JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+            gen.writeBoolean(state);
+            if (pad) {
+                gen.writeRaw(" ");
+            }
+            gen.close();
+            String docStr = sw.toString();
+            JsonParser jp = createParserUsingReader(docStr);
+            JsonToken t = jp.nextToken();
+            String exp = Boolean.valueOf(state).toString();
+            if (!exp.equals(jp.getText())) {
+                fail("Expected '"+exp+"', got '"+jp.getText());
+            }
+            assertEquals(state ? JsonToken.VALUE_TRUE : JsonToken.VALUE_FALSE, t);
+            assertEquals(null, jp.nextToken());
+            jp.close();
+        }
+    }
+
+    public void testNullWrite()
+        throws Exception
+    {
+        for (int i = 0; i < 2; ++i) {
+            boolean pad = (i & 1) == 0;
+            StringWriter sw = new StringWriter();
+            JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+            gen.writeNull();
+            if (pad) {
+                gen.writeRaw(" ");
+            }
+            gen.close();
+            String docStr = sw.toString();
+            JsonParser jp = createParserUsingReader(docStr);
+            JsonToken t = jp.nextToken();
+            String exp = "null";
+            if (!exp.equals(jp.getText())) {
+                fail("Expected '"+exp+"', got '"+jp.getText());
+            }
+            assertEquals(JsonToken.VALUE_NULL, t);
+            assertEquals(null, jp.nextToken());
+            jp.close();
+        }
+    }
+
+    // // // Then tests for structured values
+
+    public void testEmptyArrayWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartArray();
+        gen.writeEndArray();
+        gen.close();
+        String docStr = sw.toString();
+        JsonParser jp = createParserUsingReader(docStr);
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        jp.close();
+
+        // Ok, then array with nested empty array
+        sw = new StringWriter();
+        gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartArray();
+        gen.writeStartArray();
+        gen.writeEndArray();
+        gen.writeEndArray();
+        gen.close();
+        docStr = sw.toString();
+        jp = createParserUsingReader(docStr);
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertEquals(null, jp.nextToken());
+        jp.close();
+    }
+
+    public void testInvalidArrayWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartArray();
+        // Mismatch:
+        try {
+            gen.writeEndObject();
+            fail("Expected an exception for mismatched array/object write");
+        } catch (JsonGenerationException e) {
+            verifyException(e, "Current context not an object");
+        }
+    }
+
+    public void testSimpleArrayWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartArray();
+        gen.writeNumber(13);
+        gen.writeBoolean(true);
+        gen.writeString("foobar");
+        gen.writeEndArray();
+        gen.close();
+        String docStr = sw.toString();
+        JsonParser jp = createParserUsingReader(docStr);
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(13, jp.getIntValue());
+        assertEquals(JsonToken.VALUE_TRUE, jp.nextToken());
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("foobar", jp.getText());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertEquals(null, jp.nextToken());
+        jp.close();
+    }
+
+    public void testEmptyObjectWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartObject();
+        gen.writeEndObject();
+        gen.close();
+        String docStr = sw.toString();
+        JsonParser jp = createParserUsingReader(docStr);
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+        assertEquals(null, jp.nextToken());
+    }
+
+    public void testInvalidObjectWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartObject();
+        // Mismatch:
+        try {
+            gen.writeEndArray();
+            fail("Expected an exception for mismatched array/object write");
+        } catch (JsonGenerationException e) {
+            verifyException(e, "Current context not an array");
+        }
+    }
+
+    public void testSimpleObjectWrite()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.writeStartObject();
+        gen.writeFieldName("first");
+        gen.writeNumber(-901);
+        gen.writeFieldName("sec");
+        gen.writeBoolean(false);
+        gen.writeFieldName("3rd!"); // json field names are just strings, not ids with restrictions
+        gen.writeString("yee-haw");
+        gen.writeEndObject();
+        gen.close();
+        String docStr = sw.toString();
+        JsonParser jp = createParserUsingReader(docStr);
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("first", jp.getText());
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(-901, jp.getIntValue());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("sec", jp.getText());
+        assertEquals(JsonToken.VALUE_FALSE, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("3rd!", jp.getText());
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("yee-haw", jp.getText());
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+        assertEquals(null, jp.nextToken());
+        jp.close();
+    }
+
+    // // Then root-level output testing
+
+     public void testRootIntsWrite()
+         throws Exception
+     {
+         StringWriter sw = new StringWriter();
+         JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+         gen.writeNumber(1);
+         gen.writeNumber(2);
+         gen.writeNumber(-13);
+         gen.close();
+
+         String docStr = sw.toString();
+
+         JsonParser jp = createParserUsingReader(docStr);
+         assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+         assertEquals(1, jp.getIntValue());
+         assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+         assertEquals(2, jp.getIntValue());
+         assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+         assertEquals(-13, jp.getIntValue());
+         jp.close();
+     }
+
+    /*
+    //////////////////////////////////////////////////
+    // Internal methods
+    //////////////////////////////////////////////////
+     */
+    
+    private void doTestIntWrite(boolean pad)
+        throws Exception
+    {
+        int[] VALUES = new int[] {
+            0, 1, -9, 32, -32, 57, 13240, -9999, Integer.MAX_VALUE, Integer.MAX_VALUE
+        };
+        for (int i = 0; i < VALUES.length; ++i) {
+            int VALUE = VALUES[i];
+            StringWriter sw = new StringWriter();
+            JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+            gen.writeNumber(VALUE);
+            if (pad) {
+                gen.writeRaw(" ");
+            }
+            gen.close();
+            String docStr = sw.toString();
+            JsonParser jp = createParserUsingReader(docStr);
+            JsonToken t = jp.nextToken();
+            assertNotNull("Document \""+docStr+"\" yielded no tokens", t);
+            // Number are always available as lexical representation too
+            String exp = ""+VALUE;
+            if (!exp.equals(jp.getText())) {
+                fail("Expected '"+exp+"', got '"+jp.getText());
+            }
+            assertEquals(JsonToken.VALUE_NUMBER_INT, t);
+            assertEquals(VALUE, jp.getIntValue());
+            assertEquals(null, jp.nextToken());
+            jp.close();
+        }
+    }
+
+    private void doTestLongWrite(boolean pad)
+        throws Exception
+    {
+        long[] VALUES = new long[] {
+            0L, 1L, -1L, -12005002294L, Long.MIN_VALUE, Long.MAX_VALUE
+        };
+        for (int i = 0; i < VALUES.length; ++i) {
+            long VALUE = VALUES[i];
+            StringWriter sw = new StringWriter();
+            JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+            gen.writeNumber(VALUE);
+            if (pad) {
+                gen.writeRaw(" ");
+            }
+            gen.close();
+            String docStr = sw.toString();
+            JsonParser jp = createParserUsingReader(docStr);
+            JsonToken t = jp.nextToken();
+            assertNotNull("Document \""+docStr+"\" yielded no tokens", t);
+            String exp = ""+VALUE;
+            if (!exp.equals(jp.getText())) {
+                fail("Expected '"+exp+"', got '"+jp.getText());
+            }
+            assertEquals(JsonToken.VALUE_NUMBER_INT, t);
+            assertEquals(VALUE, jp.getLongValue());
+            assertEquals(null, jp.nextToken());
+            jp.close();
+        }
+    }
+}
diff --git a/src/test/main/TestJsonParser.java b/src/test/main/TestJsonParser.java
new file mode 100644
index 0000000..de36df2
--- /dev/null
+++ b/src/test/main/TestJsonParser.java
@@ -0,0 +1,262 @@
+package main;
+
+import org.codehaus.jackson.*;
+
+import java.io.IOException;
+
+/**
+ * Set of basic unit tests for verifying that the basic parser
+ * functionality works as expected.
+ */
+public class TestJsonParser
+    extends BaseTest
+{
+
+    /**
+     * This basic unit test verifies that example given in the Json
+     * specification (RFC-4627 or later) is properly parsed at
+     * high-level, without verifying values.
+     */
+    public void testSpecExampleSkipping()
+        throws Exception
+    {
+        doTestSpec(false);
+    }
+
+    /**
+     * Unit test that verifies that the spec example JSON is completely
+     * parsed, and proper values are given for contents of all
+     * events/tokens.
+     */
+    public void testSpecExampleFully()
+        throws Exception
+    {
+        doTestSpec(true);
+    }
+
+    /**
+     * Unit test that verifies that 3 basic keywords (null, true, false)
+     * are properly parsed in various contexts.
+     */
+    public void testKeywords()
+        throws Exception
+    {
+        final String DOC = "{\n"
+            +"\"key1\" : null,\n"
+            +"\"key2\" : true,\n"
+            +"\"key3\" : false,\n"
+            +"\"key4\" : [ false, null, true ]\n"
+            +"}"
+            ;
+
+        JsonParser jp = createParserUsingStream(DOC, "UTF-8");
+        assertToken(JsonToken.START_OBJECT, jp.nextToken());
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        verifyFieldName(jp, "key1");
+        assertToken(JsonToken.VALUE_NULL, jp.nextToken());
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        verifyFieldName(jp, "key2");
+        assertToken(JsonToken.VALUE_TRUE, jp.nextToken());
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        verifyFieldName(jp, "key3");
+        assertToken(JsonToken.VALUE_FALSE, jp.nextToken());
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        verifyFieldName(jp, "key4");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_FALSE, jp.nextToken());
+        assertToken(JsonToken.VALUE_NULL, jp.nextToken());
+        assertToken(JsonToken.VALUE_TRUE, jp.nextToken());
+        assertToken(JsonToken.END_ARRAY, jp.nextToken());
+
+        assertToken(JsonToken.END_OBJECT, jp.nextToken());
+    }
+
+    public void testInvalidKeywords()
+        throws Exception
+    {
+        doTestInvalidKeyword1("nul");
+        doTestInvalidKeyword2("nulla", JsonToken.VALUE_NULL);
+        doTestInvalidKeyword1("fal");
+        doTestInvalidKeyword3("False");
+        doTestInvalidKeyword2("falsett0", JsonToken.VALUE_FALSE);
+        doTestInvalidKeyword1("tr");
+        doTestInvalidKeyword1("truE");
+        doTestInvalidKeyword2("trueenough", JsonToken.VALUE_TRUE);
+    }
+
+    /*
+    /////////////////////////////////////////////
+    // Helper methods
+    /////////////////////////////////////////////
+    */
+
+    private void doTestSpec(boolean verify)
+        throws IOException
+    {
+        // First, using a StringReader:
+        doTestSpecIndividual(null, verify);
+
+        // Then with streams using supported encodings:
+        doTestSpecIndividual("UTF-8", verify);
+        doTestSpecIndividual("UTF-16BE", verify);
+        doTestSpecIndividual("UTF-16LE", verify);
+
+        /* Hmmh. UTF-32 is harder only because JDK doesn't come with
+         * a codec for it. Can't test it yet using this method
+         */
+        //doTestSpecIndividual("UTF-32", verify);
+    }
+
+    private void doTestSpecIndividual(String enc, boolean verify)
+        throws IOException
+    {
+        String doc = SAMPLE_DOC_JSON_SPEC;
+        JsonParser jp;
+
+        if (enc == null) {
+            jp = createParserUsingReader(doc);
+        } else {
+            jp = createParserUsingStream(doc, enc);
+        }
+
+        assertToken(JsonToken.START_OBJECT, jp.nextToken()); // main object
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Image'
+        if (verify) {
+            verifyFieldName(jp, "Image");
+        }
+
+        assertToken(JsonToken.START_OBJECT, jp.nextToken()); // 'image' object
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Width'
+        if (verify) {
+            verifyFieldName(jp, "Width");
+        }
+
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        if (verify) {
+            verifyIntValue(jp, SAMPLE_SPEC_VALUE_WIDTH);
+        }
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Height'
+        if (verify) {
+            verifyFieldName(jp, "Height");
+        }
+
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        verifyIntValue(jp, SAMPLE_SPEC_VALUE_HEIGHT);
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Title'
+        if (verify) {
+            verifyFieldName(jp, "Title");
+        }
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals(SAMPLE_SPEC_VALUE_TITLE, getAndVerifyText(jp));
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Thumbnail'
+        if (verify) {
+            verifyFieldName(jp, "Thumbnail");
+        }
+
+        assertToken(JsonToken.START_OBJECT, jp.nextToken()); // 'thumbnail' object
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Url'
+        if (verify) {
+            verifyFieldName(jp, "Url");
+        }
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        if (verify) {
+            assertEquals(SAMPLE_SPEC_VALUE_TN_URL, getAndVerifyText(jp));
+        }
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Height'
+        if (verify) {
+            verifyFieldName(jp, "Height");
+        }
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        verifyIntValue(jp, SAMPLE_SPEC_VALUE_TN_HEIGHT);
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'Width'
+        if (verify) {
+            verifyFieldName(jp, "Width");
+        }
+        // Width value is actually a String in the example
+        assertToken(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals(SAMPLE_SPEC_VALUE_TN_WIDTH, getAndVerifyText(jp));
+
+        assertToken(JsonToken.END_OBJECT, jp.nextToken()); // 'thumbnail' object
+
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken()); // 'IDs'
+        assertToken(JsonToken.START_ARRAY, jp.nextToken()); // 'ids' array
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); // ids[0]
+        verifyIntValue(jp, SAMPLE_SPEC_VALUE_TN_ID1);
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); // ids[1]
+        verifyIntValue(jp, SAMPLE_SPEC_VALUE_TN_ID2);
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); // ids[2]
+        verifyIntValue(jp, SAMPLE_SPEC_VALUE_TN_ID3);
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken()); // ids[3]
+        verifyIntValue(jp, SAMPLE_SPEC_VALUE_TN_ID4);
+        assertToken(JsonToken.END_ARRAY, jp.nextToken()); // 'ids' array
+
+        assertToken(JsonToken.END_OBJECT, jp.nextToken()); // 'image' object
+
+        assertToken(JsonToken.END_OBJECT, jp.nextToken()); // main object
+    }
+
+    private void verifyFieldName(JsonParser jp, String expName)
+        throws IOException
+    {
+        assertEquals(expName, jp.getText());
+        assertEquals(expName, jp.getCurrentName());
+    }
+
+    private void verifyIntValue(JsonParser jp, long expValue)
+        throws IOException
+    {
+        // First, via textual
+        assertEquals(String.valueOf(expValue), jp.getText());
+    }
+
+    private void doTestInvalidKeyword1(String value)
+        throws IOException
+    {
+        JsonParser jp = createParserUsingStream("{ \"key1\" : "+value+" }", "UTF-8");
+        assertToken(JsonToken.START_OBJECT, jp.nextToken());
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        try {
+            jp.nextToken();
+            fail("Expected an exception for malformed value keyword");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "Unrecognized token");
+        }
+    }
+
+    private void doTestInvalidKeyword2(String value, JsonToken firstValue)
+        throws IOException
+    {
+        JsonParser jp = createParserUsingStream("{ \"key1\" : "+value+" }", "UTF-8");
+        assertToken(JsonToken.START_OBJECT, jp.nextToken());
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        assertToken(firstValue, jp.nextToken());
+        try {
+            jp.nextToken();
+            fail("Expected an exception for malformed value keyword");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "Unexpected character");
+        }
+    }
+
+    private void doTestInvalidKeyword3(String value)
+        throws IOException
+    {
+        JsonParser jp = createParserUsingStream("{ \"key1\" : "+value+" }", "UTF-8");
+        assertToken(JsonToken.START_OBJECT, jp.nextToken());
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        try {
+            jp.nextToken();
+            fail("Expected an exception for malformed value keyword");
+        } catch (JsonParseException jex) {
+            verifyException(jex, "expected a valid value");
+        }
+    }
+}
+
diff --git a/src/test/main/TestNumberParsing.java b/src/test/main/TestNumberParsing.java
new file mode 100644
index 0000000..6d024ec
--- /dev/null
+++ b/src/test/main/TestNumberParsing.java
@@ -0,0 +1,42 @@
+package main;
+
+import org.codehaus.jackson.io.NumberInput;
+
+/**
+ * Set of basic unit tests for verifying that the low-level number
+ * handling methods work as expected.
+ */
+public class TestNumberParsing
+    extends BaseTest
+{
+    public void testIntParsing()
+        throws Exception
+    {
+        char[] testChars = "123456789".toCharArray();
+
+        assertEquals(3, NumberInput.parseInt(testChars, 2, 1));
+        assertEquals(123, NumberInput.parseInt(testChars, 0, 3));
+        assertEquals(2345, NumberInput.parseInt(testChars, 1, 4));
+        assertEquals(9, NumberInput.parseInt(testChars, 8, 1));
+        assertEquals(456789, NumberInput.parseInt(testChars, 3, 6));
+        assertEquals(23456, NumberInput.parseInt(testChars, 1, 5));
+        assertEquals(123456789, NumberInput.parseInt(testChars, 0, 9));
+
+        testChars = "32".toCharArray();
+        assertEquals(32, NumberInput.parseInt(testChars, 0, 2));
+        testChars = "189".toCharArray();
+        assertEquals(189, NumberInput.parseInt(testChars, 0, 3));
+
+        testChars = "10".toCharArray();
+        assertEquals(10, NumberInput.parseInt(testChars, 0, 2));
+        assertEquals(0, NumberInput.parseInt(testChars, 1, 1));
+    }
+
+    public void testLongParsing()
+        throws Exception
+    {
+        char[] testChars = "123456789012345678".toCharArray();
+
+        assertEquals(123456789012345678L, NumberInput.parseLong(testChars, 0, 18));
+    }
+}
diff --git a/src/test/main/TestNumberPrinting.java b/src/test/main/TestNumberPrinting.java
new file mode 100644
index 0000000..04862a6
--- /dev/null
+++ b/src/test/main/TestNumberPrinting.java
@@ -0,0 +1,102 @@
+package main;
+
+import org.codehaus.jackson.io.NumberOutput;
+
+import java.util.Random;
+
+/**
+ * Set of basic unit tests for verifying that the low-level number
+ * printingg methods work as expected.
+ */
+public class TestNumberPrinting
+    extends BaseTest
+{
+    public void testIntPrinting()
+        throws Exception
+    {
+        assertIntPrint(0);
+        assertIntPrint(-3);
+        assertIntPrint(1234);
+        assertIntPrint(-1234);
+        assertIntPrint(56789);
+        assertIntPrint(-56789);
+        assertIntPrint(999999);
+        assertIntPrint(-999999);
+        assertIntPrint(1000000);
+        assertIntPrint(-1000000);
+        assertIntPrint(10000001);
+        assertIntPrint(-10000001);
+        assertIntPrint(-100000012);
+        assertIntPrint(100000012);
+        assertIntPrint(1999888777);
+        assertIntPrint(-1999888777);
+        assertIntPrint(Integer.MAX_VALUE);
+        assertIntPrint(Integer.MIN_VALUE);
+
+        Random rnd = new Random(12345L);
+        for (int i = 0; i < 251000; ++i) {
+            assertIntPrint(rnd.nextInt());
+        }
+    }
+
+    public void testLongPrinting()
+        throws Exception
+    {
+        // First, let's just cover couple of edge cases
+        assertLongPrint(0L, 0);
+        assertLongPrint(1L, 0);
+        assertLongPrint(-1L, 0);
+        assertLongPrint(Long.MAX_VALUE, 0);
+        assertLongPrint(Long.MIN_VALUE, 0);
+        assertLongPrint(Long.MAX_VALUE-1L, 0);
+        assertLongPrint(Long.MIN_VALUE+1L, 0);
+
+        Random rnd = new Random(12345L);
+        // Bigger value space, need more iterations for long
+        for (int i = 0; i < 678000; ++i) {
+            long l = ((long) rnd.nextInt() << 32) | (long) rnd.nextInt();
+            assertLongPrint(l, i);
+        }
+    }
+
+    /*
+    ////////////////////////////////////////////////////////
+    // Internal methods
+    ////////////////////////////////////////////////////////
+     */
+
+    private void assertIntPrint(int value)
+    {
+        String exp = ""+value;
+        String act = printToString(value);
+
+        if (!exp.equals(act)) {
+            assertEquals("Expected conversion (exp '"+exp+"', len "+exp.length()+"; act len "+act.length()+")", exp, act);
+        }
+    }
+
+    private void assertLongPrint(long value, int index)
+    {
+        String exp = ""+value;
+        String act = printToString(value);
+
+        if (!exp.equals(act)) {
+            assertEquals("Expected conversion (exp '"+exp+"', len "+exp.length()+"; act len "+act.length()+"; number index "+index+")", exp, act);
+        }
+    }
+
+    private String printToString(int value)
+    {
+        char[] buffer = new char[12];
+        int offset = NumberOutput.outputInt(value, buffer, 0);
+        return new String(buffer, 0, offset);
+    }
+
+    private String printToString(long value)
+    {
+        char[] buffer = new char[22];
+        int offset = NumberOutput.outputLong(value, buffer, 0);
+        return new String(buffer, 0, offset);
+    }
+}
+
diff --git a/src/test/main/TestNumericValues.java b/src/test/main/TestNumericValues.java
new file mode 100644
index 0000000..529617d
--- /dev/null
+++ b/src/test/main/TestNumericValues.java
@@ -0,0 +1,124 @@
+package main;
+
+import java.math.BigDecimal;
+
+import org.codehaus.jackson.*;
+
+/**
+ * Set of basic unit tests for verifying that the basic parser
+ * functionality works as expected.
+ */
+public class TestNumericValues
+    extends BaseTest
+{
+    public void testSimpleInt()
+        throws Exception
+    {
+        int EXP_I = 1234;
+
+        JsonParser jp = createParserUsingReader("[ "+EXP_I+" ]");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(""+EXP_I, jp.getText());
+
+        assertEquals(EXP_I, jp.getIntValue());
+        assertEquals((long) EXP_I, jp.getLongValue());
+        assertEquals((double) EXP_I, jp.getDoubleValue());
+        assertEquals(BigDecimal.valueOf((long) EXP_I), jp.getDecimalValue());
+    }
+
+    public void testSimpleLong()
+        throws Exception
+    {
+        long EXP_L = 12345678907L;
+
+        JsonParser jp = createParserUsingReader("[ "+EXP_L+" ]");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(""+EXP_L, jp.getText());
+
+        assertEquals(EXP_L, jp.getLongValue());
+        // Should get an exception if trying to convert to int 
+        try {
+            jp.getIntValue();
+        } catch (JsonParseException jpe) {
+            verifyException(jpe, "out of range");
+        }
+        assertEquals((double) EXP_L, jp.getDoubleValue());
+        assertEquals(BigDecimal.valueOf((long) EXP_L), jp.getDecimalValue());
+    }
+
+    public void testSimpleDouble()
+        throws Exception
+    {
+        /* Testing double is more difficult, given the rounding
+         * errors and such. But let's try anyways.
+         */
+        String EXP_D_STR = "1234.00";
+        double EXP_D = Double.parseDouble(EXP_D_STR);
+
+        JsonParser jp = createParserUsingReader("[ "+EXP_D_STR+" ]");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_FLOAT, jp.nextToken());
+        assertEquals(EXP_D_STR, jp.getText());
+        assertEquals(EXP_D, jp.getDoubleValue());
+        jp.close();
+
+        EXP_D_STR = "2.1101567E-16";
+        EXP_D = Double.parseDouble(EXP_D_STR);
+
+        jp = createParserUsingReader("[ "+EXP_D_STR+" ]");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_FLOAT, jp.nextToken());
+        assertEquals(EXP_D_STR, jp.getText());
+        assertEquals(EXP_D, jp.getDoubleValue());
+        jp.close();
+    }
+
+    public void testNumbers()
+        throws Exception
+    {
+        final String DOC = "[ -13, 8100200300, 13.5, 0.00010, -2.033 ]";
+        JsonParser jp = createParserUsingStream(DOC, "UTF-8");
+
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(-13, jp.getIntValue());
+        assertEquals(-13L, jp.getLongValue());
+        assertEquals(-13., jp.getDoubleValue());
+        assertEquals("-13", jp.getText());
+
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(8100200300L, jp.getLongValue());
+        // Should get exception for overflow:
+        try {
+            /*int x =*/ jp.getIntValue();
+            fail("Expected an exception for overflow");
+        } catch (Exception e) {
+            verifyException(e, "out of range");
+        }
+        assertEquals(8100200300., jp.getDoubleValue());
+        assertEquals("8100200300", jp.getText());
+
+        assertToken(JsonToken.VALUE_NUMBER_FLOAT, jp.nextToken());
+        assertEquals(13, jp.getIntValue());
+        assertEquals(13L, jp.getLongValue());
+        assertEquals(13.5, jp.getDoubleValue());
+        assertEquals("13.5", jp.getText());
+
+        assertToken(JsonToken.VALUE_NUMBER_FLOAT, jp.nextToken());
+        assertEquals(0, jp.getIntValue());
+        assertEquals(0L, jp.getLongValue());
+        assertEquals(0.00010, jp.getDoubleValue());
+        assertEquals("0.00010", jp.getText());
+
+        assertToken(JsonToken.VALUE_NUMBER_FLOAT, jp.nextToken());
+        assertEquals(-2, jp.getIntValue());
+        assertEquals(-2L, jp.getLongValue());
+        assertEquals(-2.033, jp.getDoubleValue());
+        assertEquals("-2.033", jp.getText());
+
+        assertToken(JsonToken.END_ARRAY, jp.nextToken());
+    }
+}
diff --git a/src/test/main/TestPrettyPrinter.java b/src/test/main/TestPrettyPrinter.java
new file mode 100644
index 0000000..afa6deb
--- /dev/null
+++ b/src/test/main/TestPrettyPrinter.java
@@ -0,0 +1,66 @@
+package main;
+
+import org.codehaus.jackson.*;
+
+import java.io.*;
+
+/**
+ * Set of basic unit tests for verifying that indenting
+ * option of generator works correctly
+ */
+public class TestPrettyPrinter
+    extends BaseTest
+{
+    public void testSimpleDoc()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        gen.useDefaultPrettyPrinter();
+
+        gen.writeStartArray();
+        gen.writeNumber(3);
+        gen.writeString("abc");
+
+        gen.writeStartArray();
+        gen.writeBoolean(true);
+        gen.writeEndArray();
+
+        gen.writeStartObject();
+        gen.writeFieldName("f");
+        gen.writeNull();
+        gen.writeFieldName("f2");
+        gen.writeNull();
+        gen.writeEndObject();
+
+        gen.writeEndArray();
+        gen.close();
+
+        String docStr = sw.toString();
+        JsonParser jp = createParserUsingReader(docStr);
+
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(3, jp.getIntValue());
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("abc", jp.getText());
+
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.VALUE_TRUE, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("f", jp.getText());
+        assertEquals(JsonToken.VALUE_NULL, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("f2", jp.getText());
+        assertEquals(JsonToken.VALUE_NULL, jp.nextToken());
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+
+        jp.close();
+    }
+}
diff --git a/src/test/main/TestScopeMatching.java b/src/test/main/TestScopeMatching.java
new file mode 100644
index 0000000..08eee57
--- /dev/null
+++ b/src/test/main/TestScopeMatching.java
@@ -0,0 +1,73 @@
+package main;
+
+import org.codehaus.jackson.*;
+
+/**
+ * Set of basic unit tests for verifying that Array/Object scopes
+ * are properly matched.
+ */
+public class TestScopeMatching
+    extends BaseTest
+{
+    public void testUnclosedArray()
+        throws Exception
+    {
+        JsonParser jp = createParserUsingReader("[ 1, 2");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+
+        try {
+            jp.nextToken();
+            fail("Expected an exception for unclosed ARRAY");
+        } catch (JsonParseException jpe) {
+            verifyException(jpe, "expected close marker for ARRAY");
+        }
+    }
+
+    public void testUnclosedObject()
+        throws Exception
+    {
+        JsonParser jp = createParserUsingReader("{ \"key\" : 3  ");
+        assertToken(JsonToken.START_OBJECT, jp.nextToken());
+        assertToken(JsonToken.FIELD_NAME, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+
+        try {
+            jp.nextToken();
+            fail("Expected an exception for unclosed OBJECT");
+        } catch (JsonParseException jpe) {
+            verifyException(jpe, "expected close marker for OBJECT");
+        }
+    }
+
+    public void testMismatchArrayToObject()
+        throws Exception
+    {
+        JsonParser jp = createParserUsingReader("[ 1, 2 }");
+        assertToken(JsonToken.START_ARRAY, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertToken(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+
+        try {
+            jp.nextToken();
+            fail("Expected an exception for incorrectly closed ARRAY");
+        } catch (JsonParseException jpe) {
+            verifyException(jpe, "Unexpected close marker");
+        }
+    }
+
+    public void testMismatchObjectToArray()
+        throws Exception
+    {
+        JsonParser jp = createParserUsingReader("{ ]");
+        assertToken(JsonToken.START_OBJECT, jp.nextToken());
+
+        try {
+            jp.nextToken();
+            fail("Expected an exception for incorrectly closed OBJECT");
+        } catch (JsonParseException jpe) {
+            verifyException(jpe, "Unexpected close marker");
+        }
+    }
+}
diff --git a/src/test/main/TestStringGeneration.java b/src/test/main/TestStringGeneration.java
new file mode 100644
index 0000000..c68b847
--- /dev/null
+++ b/src/test/main/TestStringGeneration.java
@@ -0,0 +1,214 @@
+package main;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+
+import java.util.Random;
+
+/**
+ * Set of basic unit tests for verifying that the string
+ * generation, including character escaping, works as expected.
+ */
+public class TestStringGeneration
+    extends BaseTest
+{
+    final static String[] SAMPLES = new String[] {
+        "\"test\"",
+        "\n", "\\n", "\r\n", "a\\b", "tab:\nok?",
+        "a\tb\tc\n\fdef\t \tg\"\"\"h\"\\ijklmn\b",
+        "\"\"\"", "\\r)'\"",
+        "Longer text & other stuff:\twith some\r\n\r\n random linefeeds etc added in to cause some \"special\" handling \\\\ to occur...\n"
+    };
+
+    public void testBasicEscaping()
+        throws Exception
+    {
+        doTestBasicEscaping(false);
+        doTestBasicEscaping(true);
+    }
+
+    public void testLongerRandomSingleChunk()
+        throws Exception
+    {
+        /* Let's first generate 100k of pseudo-random characters, favoring
+         * 7-bit ascii range
+         */
+        for (int round = 0; round < 80; ++round) {
+            String content = generateRandom(75000+round);
+            doTestLongerRandom(content, false);
+            doTestLongerRandom(content, true);
+        }
+    }
+
+    public void testLongerRandomMultiChunk()
+        throws Exception
+    {
+        /* Let's first generate 100k of pseudo-random characters, favoring
+         * 7-bit ascii range
+         */
+        for (int round = 0; round < 70; ++round) {
+            String content = generateRandom(73000+round);
+            doTestLongerRandomMulti(content, false, round);
+            doTestLongerRandomMulti(content, true, round);
+        }
+    }
+
+    /*
+    ///////////////////////////////////////////////////////////////
+    // Internal methods
+    ///////////////////////////////////////////////////////////////
+     */
+
+    private String generateRandom(int len)
+    {
+        StringBuilder sb = new StringBuilder(len+1000); // pad for surrogates
+        Random r = new Random(len);
+        for (int i = 0; i < len; ++i) {
+            if (r.nextBoolean()) { // non-ascii
+                int value = r.nextInt() & 0xFFFF;
+                // Otherwise easy, except that need to ensure that
+                // surrogates are properly paired: and, also
+                // their values do not exceed 0x10FFFF
+                if (value >= 0xD800 && value <= 0xDFFF) {
+                    // Let's discard first value, then, and produce valid pair
+                    int fullValue = (r.nextInt() & 0xFFFFF);
+                    sb.append((char) (0xD800 + (fullValue >> 10)));
+                    value = 0xDC00 + (fullValue & 0x3FF);
+                }
+                sb.append((char) value);
+            } else { // ascii
+                sb.append((char) (r.nextInt() & 0x7F));
+            }
+        }
+        return sb.toString();
+    }   
+
+    private void doTestBasicEscaping(boolean charArray)
+        throws Exception
+    {
+        for (int i = 0; i < SAMPLES.length; ++i) {
+            String VALUE = SAMPLES[i];
+            StringWriter sw = new StringWriter();
+            JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+            gen.writeStartArray();
+            if (charArray) {
+                char[] buf = new char[VALUE.length() + i];
+                VALUE.getChars(0, VALUE.length(), buf, i);
+                gen.writeString(buf, i, VALUE.length());
+            } else {
+                gen.writeString(VALUE);
+            }
+            gen.writeEndArray();
+            gen.close();
+            String docStr = sw.toString();
+            JsonParser jp = createParserUsingReader(docStr);
+            assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+            JsonToken t = jp.nextToken();
+            assertEquals(JsonToken.VALUE_STRING, t);
+            assertEquals(VALUE, jp.getText());
+            assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+            assertEquals(null, jp.nextToken());
+            jp.close();
+        }
+    }
+
+    private void doTestLongerRandom(String text, boolean charArray)
+        throws Exception
+    {
+        ByteArrayOutputStream bow = new ByteArrayOutputStream(text.length());
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(bow, JsonFactory.Encoding.UTF8);
+        gen.writeStartArray();
+        if (charArray) {
+            char[] buf = new char[text.length()];
+            text.getChars(0, text.length(), buf, 0);
+            gen.writeString(buf, 0, text.length());
+        } else {
+            gen.writeString(text);
+        }
+        gen.writeEndArray();
+        gen.close();
+        byte[] docData = bow.toByteArray();
+        JsonParser jp = new JsonFactory().createJsonParser(new ByteArrayInputStream(docData));
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        JsonToken t = jp.nextToken();
+        assertEquals(JsonToken.VALUE_STRING, t);
+        String act = jp.getText();
+        if (!text.equals(act)) {
+            if (text.length() != act.length()) {
+                fail("Expected string length "+text.length()+", actual "+act.length());
+            }
+            int i = 0;
+            for (int len = text.length(); i < len; ++i) {
+                if (text.charAt(i) != act.charAt(i)) {
+                    break;
+                }
+            }
+            fail("Strings differ at position #"+i+" (len "+text.length()+"): expected char 0x"+Integer.toHexString(text.charAt(i))+", actual 0x"+Integer.toHexString(act.charAt(i)));
+        }
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertEquals(null, jp.nextToken());
+        jp.close();
+    }
+
+    private void doTestLongerRandomMulti(String text, boolean charArray, int round)
+        throws Exception
+    {
+        ByteArrayOutputStream bow = new ByteArrayOutputStream(text.length());
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(bow, JsonFactory.Encoding.UTF8);
+        gen.writeStartArray();
+
+        gen.writeString(text);
+        gen.writeEndArray();
+        gen.close();
+        
+        gen = new JsonFactory().createJsonGenerator(bow, JsonFactory.Encoding.UTF8);
+        gen.writeStartArray();
+        gen.writeStartArray();
+
+        Random rnd = new Random(text.length());
+        int offset = 0;
+
+        while (offset < text.length()) {
+            int shift = 1 + ((rnd.nextInt() & 0xFFFFF) % 12); // 1 - 12
+            int len = (1 << shift) + shift; // up to 4k
+            if ((offset + len) >= text.length()) {
+                len = text.length() - offset;
+            } else {
+            	// Need to avoid splitting surrogates though
+            	char c = text.charAt(offset+len-1);
+            	if (c >= 0xD800 && c < 0xDC00) {
+            		++len;
+            	}
+            }
+            if (charArray) {
+                char[] buf = new char[len];
+                text.getChars(offset, offset+len, buf, 0);
+                gen.writeString(buf, 0, len);
+            } else {
+                gen.writeString(text.substring(offset, offset+len));
+            }
+            offset += len;
+        }
+
+        gen.writeEndArray();
+        gen.close();
+        byte[] docData = bow.toByteArray();
+        JsonParser jp = new JsonFactory().createJsonParser(new ByteArrayInputStream(docData));
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+
+        offset = 0;
+        while (jp.nextToken() == JsonToken.VALUE_STRING) {
+        	// Let's verify, piece by piece
+        	String act = jp.getText();
+        	String exp = text.substring(offset, offset+act.length());
+        	if (!act.equals(exp)) {
+        		fail("String segment ["+offset+" - "+(offset+act.length())+"[ different");
+        	}
+        	offset += act.length();
+        }
+        assertEquals(JsonToken.END_ARRAY, jp.getCurrentToken());
+        jp.close();
+    }
+
+}
diff --git a/src/test/map/TestFromJavaType.java b/src/test/map/TestFromJavaType.java
new file mode 100644
index 0000000..ddb85b3
--- /dev/null
+++ b/src/test/map/TestFromJavaType.java
@@ -0,0 +1,102 @@
+package map;
+
+import main.BaseTest;
+
+import java.io.*;
+import java.util.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.*;
+
+/**
+ * This unit test suite tries to verify that the "Native" java type
+ * mapper can properly serialize Java core objects to JSON.
+ */
+public class TestFromJavaType
+    extends BaseTest
+{
+    public void testFromArray()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+
+        ArrayList<Object> doc = new ArrayList<Object>();
+        doc.add("Elem1");
+        doc.add(Integer.valueOf(3));
+        Map<String,Object> struct = new LinkedHashMap<String, Object>();
+        struct.put("first", Boolean.TRUE);
+        struct.put("Second", new ArrayList<Object>());
+        doc.add(struct);
+        doc.add(Boolean.FALSE);
+
+        new JavaTypeMapper().writeAny(gen, doc);
+        gen.close();
+
+        JsonParser jp = new JsonFactory().createJsonParser(new StringReader(sw.toString()));
+
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("Elem1", getAndVerifyText(jp));
+
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(3, jp.getIntValue());
+
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("first", getAndVerifyText(jp));
+
+        assertEquals(JsonToken.VALUE_TRUE, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("Second", getAndVerifyText(jp));
+
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+
+        assertEquals(JsonToken.VALUE_FALSE, jp.nextToken());
+
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertNull(jp.nextToken());
+    }
+
+    public void testFromMap()
+        throws Exception
+    {
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+
+        LinkedHashMap<String,Object> doc = new LinkedHashMap<String,Object>();
+
+        doc.put("a1", "\"text\"");
+        doc.put("int", Integer.valueOf(137));
+        doc.put("foo bar", Long.valueOf(1234567890L));
+
+        new JavaTypeMapper().writeAny(gen, doc);
+        gen.close();
+
+        JsonParser jp = new JsonFactory().createJsonParser(new StringReader(sw.toString()));
+
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("a1", getAndVerifyText(jp));
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals("\"text\"", getAndVerifyText(jp));
+
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("int", getAndVerifyText(jp));
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(137, jp.getIntValue());
+
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals("foo bar", getAndVerifyText(jp));
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(1234567890L, jp.getLongValue());
+
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+
+        assertNull(jp.nextToken());
+    }
+}
diff --git a/src/test/map/TestFromJsonType.java b/src/test/map/TestFromJsonType.java
new file mode 100644
index 0000000..096dcfb
--- /dev/null
+++ b/src/test/map/TestFromJsonType.java
@@ -0,0 +1,145 @@
+package map;
+
+import main.BaseTest;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.*;
+
+/**
+ * This unit test suite tries to verify that the "JSON type"
+ * mapper constructed JsonNodes can be serialized properly.
+ */
+public class TestFromJsonType
+    extends BaseTest
+{
+    final static String FIELD1 = "first";
+    final static String FIELD2 = "Second?";
+    final static String FIELD3 = "foo'n \"bar\"";
+    final static String FIELD4 = "4";
+
+    final static String TEXT1 = "Some text & \"stuff\"";
+    final static String TEXT2 = "Some more text:\twith\nlinefeeds and all!";
+
+    final static double DOUBLE_VALUE = 9.25;
+
+    public void testFromArray()
+        throws Exception
+    {
+        JsonTypeMapper mapper = new JsonTypeMapper();
+
+        JsonNode root = mapper.arrayNode();
+        root.appendElement(mapper.textNode(TEXT1));
+        root.appendElement(mapper.numberNode(3));
+        JsonNode obj = mapper.objectNode();
+        root.appendElement(obj);
+        obj.setElement(FIELD1, mapper.booleanNode(true));
+        obj.setElement(FIELD2, mapper.arrayNode());
+        root.appendElement(mapper.booleanNode(false));
+
+        /* Ok, ready... let's serialize using one of two alternate
+         * methods: first preferred (using generator)
+         */
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        root.writeTo(gen);
+        gen.close();
+        verifyFromArray(sw.toString());
+
+        // And then convenient but less efficient alternative:
+        verifyFromArray(root.toString());
+    }
+
+    public void testFromMap()
+        throws Exception
+    {
+        JsonTypeMapper mapper = new JsonTypeMapper();
+
+        JsonNode root = mapper.objectNode();
+        root.setElement(FIELD4, mapper.textNode(TEXT2));
+        root.setElement(FIELD3, mapper.numberNode(-1));
+        root.setElement(FIELD2, mapper.arrayNode());
+        root.setElement(FIELD1, mapper.numberNode(DOUBLE_VALUE));
+
+        /* Let's serialize using one of two alternate methods:
+         * first preferred (using generator)
+         */
+        StringWriter sw = new StringWriter();
+        JsonGenerator gen = new JsonFactory().createJsonGenerator(sw);
+        root.writeTo(gen);
+        gen.close();
+        verifyFromMap(sw.toString());
+
+        // And then convenient but less efficient alternative:
+        verifyFromMap(root.toString());
+    }
+
+    /*
+    ///////////////////////////////////////////////////////////////
+    // Internal methods
+    ///////////////////////////////////////////////////////////////
+     */
+
+    private void verifyFromArray(String input)
+        throws Exception
+    {
+        JsonParser jp = new JsonFactory().createJsonParser(new StringReader(input));
+        
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals(TEXT1, getAndVerifyText(jp));
+        
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(3, jp.getIntValue());
+        
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals(FIELD1, getAndVerifyText(jp));
+        
+        assertEquals(JsonToken.VALUE_TRUE, jp.nextToken());
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals(FIELD2, getAndVerifyText(jp));
+        
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+        
+        assertEquals(JsonToken.VALUE_FALSE, jp.nextToken());
+        
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        assertNull(jp.nextToken());
+    }
+
+    private void verifyFromMap(String input)
+        throws Exception
+    {
+        JsonParser jp = new JsonFactory().createJsonParser(new StringReader(input));
+        assertEquals(JsonToken.START_OBJECT, jp.nextToken());
+        
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals(FIELD4, getAndVerifyText(jp));
+        assertEquals(JsonToken.VALUE_STRING, jp.nextToken());
+        assertEquals(TEXT2, getAndVerifyText(jp));
+        
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals(FIELD3, getAndVerifyText(jp));
+        assertEquals(JsonToken.VALUE_NUMBER_INT, jp.nextToken());
+        assertEquals(-1, jp.getIntValue());
+        
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals(FIELD2, getAndVerifyText(jp));
+        assertEquals(JsonToken.START_ARRAY, jp.nextToken());
+        assertEquals(JsonToken.END_ARRAY, jp.nextToken());
+        
+        assertEquals(JsonToken.FIELD_NAME, jp.nextToken());
+        assertEquals(FIELD1, getAndVerifyText(jp));
+        assertEquals(JsonToken.VALUE_NUMBER_FLOAT, jp.nextToken());
+        assertEquals(DOUBLE_VALUE, jp.getDoubleValue());
+        
+        assertEquals(JsonToken.END_OBJECT, jp.nextToken());
+        
+        assertNull(jp.nextToken());
+    }
+}
diff --git a/src/test/map/TestToJavaType.java b/src/test/map/TestToJavaType.java
new file mode 100644
index 0000000..6180742
--- /dev/null
+++ b/src/test/map/TestToJavaType.java
@@ -0,0 +1,141 @@
+package map;
+
+import main.BaseTest;
+
+import java.io.*;
+
+import org.codehaus.jackson.*;
+import org.codehaus.jackson.map.*;
+import org.codehaus.jackson.map.impl.*;
+
+/**
+ * This unit test suite tries to verify that the "JSON" type
+ * mapper can properly parse JSON and bind contents into appropriate
+ * JsonNode instances.
+ */
+public class TestToJavaType
+    extends BaseTest
+{
+    public void testSimple()
+        throws Exception
+    {
+        final String JSON = SAMPLE_DOC_JSON_SPEC;
+
+        JsonFactory jf = new JsonFactory();
+        JsonNode result = new JsonTypeMapper().read(jf.createJsonParser(new StringReader(JSON)));
+        assertType(result, ObjectNode.class);
+        assertEquals(1, result.size());
+        assertTrue(result.isObject());
+
+        ObjectNode main = (ObjectNode) result;
+        assertEquals("Image", main.getFieldNames().next());
+        JsonNode ob = main.getFieldValues().next();
+        assertType(ob, ObjectNode.class);
+        ObjectNode imageMap = (ObjectNode) ob;
+
+        assertEquals(5, imageMap.size());
+        ob = imageMap.getFieldValue("Width");
+        assertTrue(ob.isIntegralNumber());
+        assertEquals(SAMPLE_SPEC_VALUE_WIDTH, ob.getIntValue());
+
+        ob = imageMap.getFieldValue("Height");
+        assertTrue(ob.isIntegralNumber());
+        assertEquals(SAMPLE_SPEC_VALUE_HEIGHT, ob.getIntValue());
+
+        ob = imageMap.getFieldValue("Title");
+        assertTrue(ob.isTextual());
+        assertEquals(SAMPLE_SPEC_VALUE_TITLE, ob.getTextValue());
+
+        ob = imageMap.getFieldValue("Thumbnail");
+        assertType(ob, ObjectNode.class);
+        ObjectNode tn = (ObjectNode) ob;
+        ob = tn.getFieldValue("Url");
+        assertTrue(ob.isTextual());
+        assertEquals(SAMPLE_SPEC_VALUE_TN_URL, ob.getTextValue());
+        ob = tn.getFieldValue("Height");
+        assertTrue(ob.isIntegralNumber());
+        assertEquals(SAMPLE_SPEC_VALUE_TN_HEIGHT, ob.getIntValue());
+        ob = tn.getFieldValue("Width");
+        assertTrue(ob.isTextual());
+        assertEquals(SAMPLE_SPEC_VALUE_TN_WIDTH, ob.getTextValue());
+
+        ob = imageMap.getFieldValue("IDs");
+        assertTrue(ob.isArray());
+        ArrayNode idList = (ArrayNode) ob;
+        assertEquals(4, idList.size());
+        {
+            int[] values = new int[] {
+                SAMPLE_SPEC_VALUE_TN_ID1,
+                SAMPLE_SPEC_VALUE_TN_ID2,
+                SAMPLE_SPEC_VALUE_TN_ID3,
+                SAMPLE_SPEC_VALUE_TN_ID4
+            };
+            for (int i = 0; i < values.length; ++i) {
+                assertEquals(values[i], idList.getElementValue(i).getIntValue());
+            }
+        }
+    }
+
+    /**
+     * Type mappers should be able to gracefully deal with end of
+     * input.
+     */
+    public void testEOF()
+        throws Exception
+    {
+        JsonFactory jf = new JsonFactory();
+        String JSON =
+            "{ \"key\": [ { \"a\" : { \"name\": \"foo\",  \"type\": 1\n"
+            +"},  \"type\": 3, \"url\": \"http://www.google.com\" } ],\n"
+            +"\"name\": \"xyz\", \"type\": 1, \"url\" : null }\n  "
+            ;
+        JsonParser jp = jf.createJsonParser(new StringReader(JSON));
+        JsonTypeMapper mapper = new JsonTypeMapper();
+        JsonNode result = mapper.read(jp);
+        assertTrue(result.isObject());
+        assertEquals(4, result.size());
+
+        assertNull(mapper.read(jp));
+    }
+
+    public void testMultipl()
+        throws Exception
+    {
+        JsonFactory jf = new JsonFactory();
+        String JSON = "12  \"string\" [ 1, 2, 3 ]";
+        JsonParser jp = jf.createJsonParser(new StringReader(JSON));
+        JsonTypeMapper mapper = new JsonTypeMapper();
+
+        JsonNode result = mapper.read(jp);
+        assertTrue(result.isIntegralNumber());
+        assertEquals(12, result.getIntValue());
+
+        result = mapper.read(jp);
+        assertTrue(result.isTextual());
+        assertEquals("string", result.getTextValue());
+
+        result = mapper.read(jp);
+        assertTrue(result.isArray());
+        assertEquals(3, result.size());
+
+        assertNull(mapper.read(jp));
+    }
+
+    /*
+    //////////////////////////////////////////////
+    // Helper methods
+    //////////////////////////////////////////////
+     */
+
+    private void assertType(Object ob, Class<?> expType)
+    {
+        if (ob == null) {
+            fail("Expected an object of type "+expType.getName()+", got null");
+        }
+        Class<?> cls = ob.getClass();
+        if (!expType.isAssignableFrom(cls)) {
+            fail("Expected type "+expType.getName()+", got "+cls.getName());
+        }
+    }
+}
+