Internal change

PiperOrigin-RevId: 445190108
Change-Id: I40f8c4ff8684bcd44d4c6663a5db5373b0f3105c
diff --git a/GOIMPORT/CONFIGURATION b/GOIMPORT/CONFIGURATION
new file mode 100644
index 0000000..7352905
--- /dev/null
+++ b/GOIMPORT/CONFIGURATION
@@ -0,0 +1,30 @@
+# This file configures imports managed by //third_party/golang/import.go.
+# See go/thirdpartygo for more information.
+
+# ImportFiles specifies which files to import from the upstream source.
+[ImportFiles]
+  exclude: /METADATA$  # see go/metadata
+  exclude: /OWNERS$    # see go/owners
+  exclude: /BUILD$     # see go/build
+
+  exclude: /\.gitignore$
+  exclude: /\.travis\.yml$
+  exclude: /\.github/
+  exclude: appveyor\.yml
+  exclude: Makefile
+
+  include: .*
+
+# ImportRenames specifies whether to rename any source files or directories.
+[ImportRenames]
+  sed: s:^/LICENSE([.](gpl|md|txt))?$:/LICENSE:I  # see go/thirdpartylicenses
+
+# GoogleFiles specifies files added to the import to support use within google3.
+[GoogleFiles]
+  include: ^/GOIMPORT/    # configuration and metadata for import.go tool
+  include: /METADATA      # see go/metadata
+  include: /OWNERS$       # see go/owners
+  include: /BUILD$        # see go/build
+  include: /g3doc/        # see go/g3doc
+  include: /google_[^/]+  # treat files with google_ prefix as internal
+  exclude: .*
diff --git a/GOIMPORT/MANIFEST b/GOIMPORT/MANIFEST
new file mode 100644
index 0000000..6c15703
--- /dev/null
+++ b/GOIMPORT/MANIFEST
@@ -0,0 +1,220 @@
+# This file contains a manifest of all files imported by import.go.
+# This is for both human review and machine consumption.
+# Please see go/thirdpartygo for more information.
+# DO NOT EDIT. This must only be generated by //third_party/golang/import.go.
+
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx BUILD
++ 60dd515fdbe21333799a36526fd15e03 GOIMPORT/CONFIGURATION
++ 1d129443eba3475c1735a07bcbf76d3d GOIMPORT/MANIFEST
+= 9fc9e1d6b93fb3971a451d68e21a4437 LICENSE
++ 8ab9622f5371e34c3139ecd12aaface5 METADATA
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx OWNERS
+= 3ec7645ccbfe13331f1675d54034d89d README.md
+= 5f4dfa7393d07daf243d8d64eb715e8b decoder.go
+= a0c0a0fe9e9c757954bca07a18482c2a decoder_test.go
+= a6c7fbb988e532707b47bbd31daf4676 go.mod
+= 26a95cbd0a533fcf286f3b81e3951000 go.sum
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx google_init_test.go
+= 722482ceb1ca02766cbbe95643914f8d hcl.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/ast/BUILD
+= 44b6e0beab4e6ea45fb8ef461a2102a5 hcl/ast/ast.go
+= d25abe4589b26bdd1aace8c370e4000b hcl/ast/ast_test.go
+= 932d3d3d1c148dea42a577082c133f09 hcl/ast/walk.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/fmtcmd/BUILD
+= a6ca7d75fee4ae0f4bc8da2765b3bfda hcl/fmtcmd/fmtcmd.go
+= 293448883316328b60f25e96744dc0ec hcl/fmtcmd/fmtcmd_test.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/fmtcmd/google_init_test.go
+= 4ebc5b689879cb343cf69de8dd1b239e hcl/fmtcmd/test-fixtures/.hidden.ignore
+= 76fd57b31954cd40f63a680085902804 hcl/fmtcmd/test-fixtures/dir.ignore
+= 4ebc5b689879cb343cf69de8dd1b239e hcl/fmtcmd/test-fixtures/file.ignore
+= 76fd57b31954cd40f63a680085902804 hcl/fmtcmd/test-fixtures/good.hcl
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/parser/BUILD
+= fbe3d6ec123bbde05582543f34457337 hcl/parser/error.go
+= 6875dd035dfe822a20e3c03adf0c8e1b hcl/parser/error_test.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/parser/google_init_test.go
+= bceab46df86ccdc8337061cef8d831a7 hcl/parser/parser.go
+= 25e601243f364fa54341316ee78efa8d hcl/parser/parser_test.go
+= 1dfc0347731522f613daa68e834550ef hcl/parser/test-fixtures/array_comment.hcl
+= 2d26ccd1144832f195530dce250efcb1 hcl/parser/test-fixtures/array_comment_2.hcl
+= 041aa96c5f5999b918a63ed365afed80 hcl/parser/test-fixtures/assign_colon.hcl
+= 5cd9f072067f7ccf981450d4d394c2be hcl/parser/test-fixtures/assign_deep.hcl
+= c0fd3ca29599eeee8a8f1e776193aef2 hcl/parser/test-fixtures/comment.hcl
+= c0fd3ca29599eeee8a8f1e776193aef2 hcl/parser/test-fixtures/comment_crlf.hcl
+= a993e21d347d89f036f12cde555e094e hcl/parser/test-fixtures/comment_lastline.hcl
+= 6c4b17a554754808c8d43d58d662029e hcl/parser/test-fixtures/comment_single.hcl
+= 4c7f11c8a0e6c90de865b5d22fec4e09 hcl/parser/test-fixtures/complex.hcl
+= 4c7f11c8a0e6c90de865b5d22fec4e09 hcl/parser/test-fixtures/complex_crlf.hcl
+= 7579e28f3f1de969fdd8c0de3a7d76d9 hcl/parser/test-fixtures/complex_key.hcl
+= 76fd57b31954cd40f63a680085902804 hcl/parser/test-fixtures/empty.hcl
+= c2acd645cebd71b4f2f6114ba2004b49 hcl/parser/test-fixtures/git_crypt.hcl
+= d1adc46731e6372ee76a369bfb9030b9 hcl/parser/test-fixtures/key_without_value.hcl
+= 4123427f738137585ea756b93a9e7919 hcl/parser/test-fixtures/list.hcl
+= 5a82b989dfa7e4267d91b2fda2da51df hcl/parser/test-fixtures/list_comma.hcl
+= 589626f3ac4fe8812b1750aaa1832d0b hcl/parser/test-fixtures/missing_braces.hcl
+= 7a5a1a1320e3176e77bfaaa5c386dbb2 hcl/parser/test-fixtures/multiple.hcl
+= b1bd4325e8a5eeb0f466e9233666912b hcl/parser/test-fixtures/object_key_assign_without_value.hcl
+= e331b2512363f95541e90eebf550893f hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
+= 8e8bea5b532647d0fc990f222537fd5e hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
+= a81600fcb40ed3cd8145555a75702366 hcl/parser/test-fixtures/object_key_without_value.hcl
+= 2bdec03e74e76e717119bfd8588112d1 hcl/parser/test-fixtures/object_list_comma.hcl
+= a82a2dd0791cd30e15b8b7cde6f4b549 hcl/parser/test-fixtures/old.hcl
+= 0a2dc0adc957de57373d11f08649a60b hcl/parser/test-fixtures/structure.hcl
+= a83ef4a98eecf9536bf789f07b0f1800 hcl/parser/test-fixtures/structure_basic.hcl
+= 13d6e393d446aadb7a8384f65383bdab hcl/parser/test-fixtures/structure_empty.hcl
+= e62ae69675b3caf43c4310e4f6e591d2 hcl/parser/test-fixtures/types.hcl
+= cadd8c7c4e479a269ccd3b2236e82ecc hcl/parser/test-fixtures/unterminated_object.hcl
+= 7cd80de978a452249d21310b495f85a4 hcl/parser/test-fixtures/unterminated_object_2.hcl
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/printer/BUILD
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/printer/google_init_test.go
+= 05610a814db7a71dea8bb2333f4f59f1 hcl/printer/nodes.go
+= 241abe4522ada56c779db0dfb1f36634 hcl/printer/printer.go
+= 7fcc58e2f2a81c6b622eb50e942e68f9 hcl/printer/printer_test.go
+= bbd89ef2111b59ea8078f50d9269f44d hcl/printer/testdata/comment.golden
+= 2c8e19a7d2e355a0b29191d228c97dcb hcl/printer/testdata/comment.input
+= e7e83b9017185ca767cc6292521aece1 hcl/printer/testdata/comment_aligned.golden
+= 257a7a272d86450bbc0cf3e4f6d4a71a hcl/printer/testdata/comment_aligned.input
+= fecd8249c32383f35cb01c83e7d08658 hcl/printer/testdata/comment_array.golden
+= fecd8249c32383f35cb01c83e7d08658 hcl/printer/testdata/comment_array.input
+= cb906f156480209e7d5a4fe4138d550a hcl/printer/testdata/comment_crlf.input
+= fe439d93de634dbe99bb314ea839947b hcl/printer/testdata/comment_end_file.golden
+= f31530365ea05122d3f822161587fe6b hcl/printer/testdata/comment_end_file.input
+= 3b9e1432128a993baadb6115a40887e0 hcl/printer/testdata/comment_multiline_indent.golden
+= bcf231eacf497c938a87da9d4297b3d0 hcl/printer/testdata/comment_multiline_indent.input
+= ebfd38d7b81aa18d7da7d6b1b7ff4e7c hcl/printer/testdata/comment_multiline_no_stanza.golden
+= 19a8cf9f5aba7476c29c0144dbae5b7e hcl/printer/testdata/comment_multiline_no_stanza.input
+= 8bfd01668d990c35f2c1d46d9ce8a61e hcl/printer/testdata/comment_multiline_stanza.golden
+= 06ef2d7bb8d3fc53c9b95e59f5587de5 hcl/printer/testdata/comment_multiline_stanza.input
+= e32dace2d85db8a09dd6d71d7993edc5 hcl/printer/testdata/comment_newline.golden
+= 18a368b554c8c72a10ab40f8577e8124 hcl/printer/testdata/comment_newline.input
+= ec79f18071fb9438996c950bd8fe59f7 hcl/printer/testdata/comment_object_multi.golden
+= ec79f18071fb9438996c950bd8fe59f7 hcl/printer/testdata/comment_object_multi.input
+= a797b1fed2941dc0ca6fb4546bf0bddd hcl/printer/testdata/comment_standalone.golden
+= 2f896bc9e4c7d386ba044bf85ba906cf hcl/printer/testdata/comment_standalone.input
+= 66ecba01f50ec6214fe44cb722571be1 hcl/printer/testdata/complexhcl.golden
+= 50c90d101a22eef9b0a38b25e4540457 hcl/printer/testdata/complexhcl.input
+= c12caa37229b76a3556addce3dbe72a1 hcl/printer/testdata/empty_block.golden
+= 0ea7c97a8bbbc1f81545683a99e78297 hcl/printer/testdata/empty_block.input
+= 4944eefd0c0c83421fdf65228d155677 hcl/printer/testdata/list.golden
+= f9f35ddbacf403cae732aa103cd39f82 hcl/printer/testdata/list.input
+= d95aa28d3b9cc16b45a74456db3888c1 hcl/printer/testdata/list_comment.golden
+= 0dae3d6a2bda961747cb38614fdf9bd2 hcl/printer/testdata/list_comment.input
+= 4fa2253a72a295bcebc2331360afc7af hcl/printer/testdata/list_of_objects.golden
+= aa977954a156c04fd65906b16f3e69fe hcl/printer/testdata/list_of_objects.input
+= 13c78ba4f673ef1694d8f7e9fa9b6749 hcl/printer/testdata/multiline_string.golden
+= 13c78ba4f673ef1694d8f7e9fa9b6749 hcl/printer/testdata/multiline_string.input
+= 73ff55a8050673540269ff4aecce59b5 hcl/printer/testdata/object_singleline.golden
+= 7e9ba29e68c80234f9b36088e7d087f8 hcl/printer/testdata/object_singleline.input
+= 71c5ec0810945a81757a8976dad1aad8 hcl/printer/testdata/object_with_heredoc.golden
+= 4ade5a6190aff82b606cbafa9d8ac9c4 hcl/printer/testdata/object_with_heredoc.input
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/scanner/BUILD
+= 2b35eff032efe74168b9a02fdb9d3f19 hcl/scanner/scanner.go
+= a23ccff552b5d8c22ca4e2a28635a555 hcl/scanner/scanner_test.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/strconv/BUILD
+= 1d980f4a67b739d83a208e512daaeb72 hcl/strconv/quote.go
+= e0907bd84cc08174ce5f2f3695d31ccc hcl/strconv/quote_test.go
+= 1dfc0347731522f613daa68e834550ef hcl/test-fixtures/array_comment.hcl
+= 041aa96c5f5999b918a63ed365afed80 hcl/test-fixtures/assign_colon.hcl
+= c0fd3ca29599eeee8a8f1e776193aef2 hcl/test-fixtures/comment.hcl
+= 6c4b17a554754808c8d43d58d662029e hcl/test-fixtures/comment_single.hcl
+= 30205d5f034c3a4dadc8b82fbb3e07ff hcl/test-fixtures/complex.hcl
+= 7579e28f3f1de969fdd8c0de3a7d76d9 hcl/test-fixtures/complex_key.hcl
+= 76fd57b31954cd40f63a680085902804 hcl/test-fixtures/empty.hcl
+= 4123427f738137585ea756b93a9e7919 hcl/test-fixtures/list.hcl
+= 5a82b989dfa7e4267d91b2fda2da51df hcl/test-fixtures/list_comma.hcl
+= 7a5a1a1320e3176e77bfaaa5c386dbb2 hcl/test-fixtures/multiple.hcl
+= a82a2dd0791cd30e15b8b7cde6f4b549 hcl/test-fixtures/old.hcl
+= 0a2dc0adc957de57373d11f08649a60b hcl/test-fixtures/structure.hcl
+= a83ef4a98eecf9536bf789f07b0f1800 hcl/test-fixtures/structure_basic.hcl
+= 13d6e393d446aadb7a8384f65383bdab hcl/test-fixtures/structure_empty.hcl
+= e62ae69675b3caf43c4310e4f6e591d2 hcl/test-fixtures/types.hcl
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx hcl/token/BUILD
+= e960476bf7ff9361638e5a38e995e295 hcl/token/position.go
+= ca64d696104d026269b7edf9cf054391 hcl/token/token.go
+= 789fcd8c7cc86a9201d8384c91bcf900 hcl/token/token_test.go
+= 5597b50fc38a4700f360b301ef018e53 hcl_test.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx json/parser/BUILD
+= e6fd01837f50baef26ad5835c9cec949 json/parser/flatten.go
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx json/parser/google_init_test.go
+= c7de15da9b5b9e8b30d2554fd3a9aa47 json/parser/parser.go
+= 8ee59f54e032060539e643f27f20917f json/parser/parser_test.go
+= 23b4b686a3f51062dd488e1f03f82d6d json/parser/test-fixtures/array.json
+= be5fe65e7fa061f8f2a33871f9ede8cb json/parser/test-fixtures/bad_input_128.json
+= a7c4817249816e203c09e0cc4f77c49c json/parser/test-fixtures/bad_input_tf_8110.json
+= b13df1fa074f81475e6d3644297cef35 json/parser/test-fixtures/basic.json
+= f22db0163801402d1bc183b5b9503ef0 json/parser/test-fixtures/good_input_tf_8110.json
+= 9f7d5684d6f5a16fc88230d386f90b86 json/parser/test-fixtures/object.json
+= 88d60dc9fe41520e764d4e8abcb7466c json/parser/test-fixtures/types.json
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx json/scanner/BUILD
+= f60029d8a358bd9a78021a7b57e26da8 json/scanner/scanner.go
+= c4566556bb0d65360665267d37b2db80 json/scanner/scanner_test.go
+= 23b4b686a3f51062dd488e1f03f82d6d json/test-fixtures/array.json
+= b13df1fa074f81475e6d3644297cef35 json/test-fixtures/basic.json
+= 9f7d5684d6f5a16fc88230d386f90b86 json/test-fixtures/object.json
+= 88d60dc9fe41520e764d4e8abcb7466c json/test-fixtures/types.json
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx json/token/BUILD
+= e960476bf7ff9361638e5a38e995e295 json/token/position.go
+= cb7a8eef22996bcdcb5b2c1d624a02d5 json/token/token.go
+= 825d857974fe194c7fd31ede21d9d3b5 json/token/token_test.go
+= 1de42b4a0b1b96897bf98fee442d79eb lex.go
+= eecbe6ab9fd9d2e4a8112f5716d64342 lex_test.go
+= 624f60832f40ee2ee3484ce1b0229c2b parse.go
+= 5cd9f072067f7ccf981450d4d394c2be test-fixtures/assign_deep.hcl
+= 29eada54d9d67b8ea3176390af2869f6 test-fixtures/basic.hcl
+= 73261bc19c6516037316808dc9d0befb test-fixtures/basic.json
+= 2cfbed1438cb24a5dee8fa50745c1e71 test-fixtures/basic_bool.hcl
+= 5b4ef779c5007abca3d562b03fea6a4e test-fixtures/basic_bool_int.hcl
+= 495542dc1c801690a401eeec90960d2f test-fixtures/basic_bool_string.hcl
+= 83347a9c40a9d26a7840f3ce646fa275 test-fixtures/basic_int_string.hcl
+= 734b850cc4895d53e0bb0fa899600ad9 test-fixtures/basic_squish.hcl
+= 957a4def52e1468cc7eb0d83b6fdd963 test-fixtures/block_assign.hcl
+= 286a25b84c3eab07f45a121c2d160ca0 test-fixtures/decode_policy.hcl
+= abdd42d7dcbbe1f12fc447573cc903cc test-fixtures/decode_policy.json
+= fe0f6540b1b86750204947564358a851 test-fixtures/decode_tf_variable.hcl
+= 92e385416508547555ca4a989b30bb56 test-fixtures/decode_tf_variable.json
+= 1fabb2bfc27ceb32fa3ed8f8ee58aa98 test-fixtures/empty.hcl
+= e37ca34675fcfbffaa663d361717c0af test-fixtures/escape.hcl
+= 44f20a673653460b31a26de265804f7c test-fixtures/escape_backslash.hcl
+= fee0fd0d8e4fbb50aa91b181d0d50145 test-fixtures/flat.hcl
+= c02d1db46dab9bcad8c923ae32f825fe test-fixtures/float.hcl
+= 3b051832cff276edfa8cc0bc34ed3390 test-fixtures/float.json
+= c2acd645cebd71b4f2f6114ba2004b49 test-fixtures/git_crypt.hcl
+= f323a5a66b97133fe9873d5f25f581bc test-fixtures/interpolate.json
+= 014c8374769948b73fc9261f3bcc0856 test-fixtures/list_of_lists.hcl
+= 92514f9c83762f231292bd170f5f9aad test-fixtures/list_of_maps.hcl
+= fbb330ae7d53b284277dd4a643f39645 test-fixtures/multiline.hcl
+= bc62cc41a6e524ed972939290d32d10f test-fixtures/multiline.json
+= c3246ea42189a117535cc477e73a0ec7 test-fixtures/multiline_bad.hcl
+= 6d26bc1dbede4bd3fadbba1878e06ed3 test-fixtures/multiline_indented.hcl
+= 4bd8b5fce12c5f88818c4600c3c928e3 test-fixtures/multiline_literal.hcl
+= 701b0e055a50675019e1ad2049575e92 test-fixtures/multiline_literal_with_hil.hcl
+= eb09886f668838a89ba58ca14b4f89cf test-fixtures/multiline_no_eof.hcl
+= 84b1ca96e02270bfbaf0a2c934014825 test-fixtures/multiline_no_hanging_indent.hcl
+= 2ff7199077b91504b903545c579ea3fd test-fixtures/multiline_no_marker.hcl
+= a66ba6f0e59a5eb215af45e6349aa81c test-fixtures/nested_block_comment.hcl
+= 804a4a7ef1483a861c02f0b8289da085 test-fixtures/nested_provider_bad.hcl
+= a6c78ebbe0c7532d6ae4fb26543971af test-fixtures/null_strings.json
+= 0bb77712eea08ab942b12073f3f7a502 test-fixtures/object_list.json
+= 116d990cc004f41260f9ff9f052b1b79 test-fixtures/object_with_bool.hcl
+= 68fac30aa38a28a980f58abbde3f6c5f test-fixtures/scientific.hcl
+= 851dd6d3574f3f98e9fe03138d2cbe80 test-fixtures/scientific.json
+= a4ec8001034538c2cbe5373acd5fd7be test-fixtures/slice_expand.hcl
+= 59742383caf2d5ca57432261a33859a3 test-fixtures/structure.hcl
+= bd784f1189b7071ee87a87a63e9a7fce test-fixtures/structure.json
+= 668d5c51223492336da099e854b84705 test-fixtures/structure2.hcl
+= 68fff6ceef095e1858f61e3c9af2d3ac test-fixtures/structure2.json
+= e94e16ff1b2482f5df6f0161b6238ab7 test-fixtures/structure_flat.json
+= 135a21b6f7d5ace105341b850f1b343f test-fixtures/structure_flatmap.hcl
+= dc74efb3e7cdcb2ff6786ca7b02c8d41 test-fixtures/structure_list.hcl
+= 29af1f378b98706145828c1208056072 test-fixtures/structure_list.json
+= 098031534eb2336e7eef45a3da3405dc test-fixtures/structure_list_deep.json
+= bcfd74b616eb96bcbb4d2c05d752d521 test-fixtures/structure_list_empty.json
+= 8cf3c484b10d4f28c2f540ff0cc08532 test-fixtures/structure_multi.hcl
+= 366b191aa78b985afb8f32e9536bdf46 test-fixtures/structure_multi.json
+= 5a8f8b312ee9a4c6d15bae25d5c4a06f test-fixtures/terraform_heroku.hcl
+= ee4620e46a32ec08464e29e7b5b3cc35 test-fixtures/terraform_heroku.json
+= b590828312c234635bd3b214c13c9fdc test-fixtures/terraform_variable_invalid.json
+= a829f261769e65dceca35c86772c11de test-fixtures/tfvars.hcl
+= 414769c1972a6fdd83d61f1cec7c7605 test-fixtures/unterminated_block_comment.hcl
+= cadd8c7c4e479a269ccd3b2236e82ecc test-fixtures/unterminated_brace.hcl
++ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx testhelper/BUILD
+= 81ecf2c8ae9979598dbd8e2bc2cc7444 testhelper/unix2dos.go
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c822332
--- /dev/null
+++ b/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+  * Single line comments start with `#` or `//`
+
+  * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+    are not allowed. A multi-line comment (also known as a block comment)
+    terminates at the first `*/` found.
+
+  * Values are assigned with the syntax `key = value` (whitespace doesn't
+    matter). The value can be any primitive: a string, number, boolean,
+    object, or list.
+
+  * Strings are double-quoted and can contain any UTF-8 characters.
+    Example: `"Hello, World"`
+
+  * Multi-line strings start with `<<EOF` at the end of a line, and end
+    with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+    Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+  * Numbers are assumed to be base 10. If you prefix a number with 0x,
+    it is treated as a hexadecimal. If it is prefixed with 0, it is
+    treated as an octal. Numbers can be in scientific notation: "1e10".
+
+  * Boolean values: `true`, `false`
+
+  * Arrays can be made by wrapping it in `[]`. Example:
+    `["foo", "bar", 42]`. Arrays can contain primitives,
+    other arrays, and objects. As an alternative, lists
+    of objects can be created with repeated blocks, using
+    this structure:
+
+    ```hcl
+    service {
+        key = "value"
+    }
+
+    service {
+        key = "value"
+    }
+    ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+    description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+  "variable": {
+      "ami": {
+          "description": "the AMI to use"
+        }
+    }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+  * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+    and syntax that HCL was based off of.
+
+  * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+    in pure Go (no goyacc) and support for a printer.
diff --git a/decoder.go b/decoder.go
new file mode 100644
index 0000000..0f0514f
--- /dev/null
+++ b/decoder.go
@@ -0,0 +1,736 @@
+package hcl
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/hashicorp/hcl/hcl/parser/parser"
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+	// nodeType holds a reference to the type of ast.Node
+	nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+	root, err := parse(bs)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+	obj, err := Parse(in)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+	val := reflect.ValueOf(out)
+	if val.Kind() != reflect.Ptr {
+		return errors.New("result must be a pointer")
+	}
+
+	// If we have the file, we really decode the root node
+	if f, ok := n.(*ast.File); ok {
+		n = f.Node
+	}
+
+	var d decoder
+	return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+	stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+	k := result
+
+	// If we have an interface with a valid value, we use that
+	// for the check.
+	if result.Kind() == reflect.Interface {
+		elem := result.Elem()
+		if elem.IsValid() {
+			k = elem
+		}
+	}
+
+	// Push current onto stack unless it is an interface.
+	if k.Kind() != reflect.Interface {
+		d.stack = append(d.stack, k.Kind())
+
+		// Schedule a pop
+		defer func() {
+			d.stack = d.stack[:len(d.stack)-1]
+		}()
+	}
+
+	switch k.Kind() {
+	case reflect.Bool:
+		return d.decodeBool(name, node, result)
+	case reflect.Float32, reflect.Float64:
+		return d.decodeFloat(name, node, result)
+	case reflect.Int, reflect.Int32, reflect.Int64:
+		return d.decodeInt(name, node, result)
+	case reflect.Interface:
+		// When we see an interface, we make our own thing
+		return d.decodeInterface(name, node, result)
+	case reflect.Map:
+		return d.decodeMap(name, node, result)
+	case reflect.Ptr:
+		return d.decodePtr(name, node, result)
+	case reflect.Slice:
+		return d.decodeSlice(name, node, result)
+	case reflect.String:
+		return d.decodeString(name, node, result)
+	case reflect.Struct:
+		return d.decodeStruct(name, node, result)
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+		}
+	}
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.BOOL, token.STRING, token.NUMBER:
+			var v bool
+			s := strings.ToLower(strings.Replace(n.Token.Text, "\"", "", -1))
+			switch s {
+			case "1", "true":
+				v = true
+			case "0", "false":
+				v = false
+			default:
+				return fmt.Errorf("decodeBool: Unknown value for boolean: %s", n.Token.Text)
+			}
+
+			result.Set(reflect.ValueOf(v))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+			v, err := strconv.ParseFloat(n.Token.Text, 64)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		case token.STRING:
+			v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+	// When we see an ast.Node, we retain the value to enable deferred decoding.
+	// Very useful in situations where we want to preserve ast.Node information
+	// like Pos
+	if result.Type() == nodeType && result.CanSet() {
+		result.Set(reflect.ValueOf(node))
+		return nil
+	}
+
+	var set reflect.Value
+	redecode := true
+
+	// For testing types, ObjectType should just be treated as a list. We
+	// set this to a temporary var because we want to pass in the real node.
+	testNode := node
+	if ot, ok := node.(*ast.ObjectType); ok {
+		testNode = ot.List
+	}
+
+	switch n := testNode.(type) {
+	case *ast.ObjectList:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+			set = result
+		}
+	case *ast.ObjectType:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+			set = result
+		}
+	case *ast.ListType:
+		var temp []interface{}
+		tempVal := reflect.ValueOf(temp)
+		result := reflect.MakeSlice(
+			reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+		set = result
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.BOOL:
+			var result bool
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.FLOAT:
+			var result float64
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.NUMBER:
+			var result int
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.STRING, token.HEREDOC:
+			set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+		default:
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+			}
+		}
+	default:
+		return fmt.Errorf(
+			"%s: cannot decode into interface: %T",
+			name, node)
+	}
+
+	// Set the result to what its supposed to be, then reset
+	// result so we don't reflect into this method anymore.
+	result.Set(set)
+
+	if redecode {
+		// Revisit the node so that we can use the newly instantiated
+		// thing and populate it.
+		if err := d.decode(name, node, result); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+	if item, ok := node.(*ast.ObjectItem); ok {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	n, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+		}
+	}
+
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	resultKeyType := resultType.Key()
+	if resultKeyType.Kind() != reflect.String {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: map must have string keys", name),
+		}
+	}
+
+	// Make a map if it is nil
+	resultMap := result
+	if result.IsNil() {
+		resultMap = reflect.MakeMap(
+			reflect.MapOf(resultKeyType, resultElemType))
+	}
+
+	// Go through each element and decode it.
+	done := make(map[string]struct{})
+	for _, item := range n.Items {
+		if item.Val == nil {
+			continue
+		}
+
+		// github.com/hashicorp/terraform/issue/5740
+		if len(item.Keys) == 0 {
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: map must have string keys", name),
+			}
+		}
+
+		// Get the key we're dealing with, which is the first item
+		keyStr := item.Keys[0].Token.Value().(string)
+
+		// If we've already processed this key, then ignore it
+		if _, ok := done[keyStr]; ok {
+			continue
+		}
+
+		// Determine the value. If we have more than one key, then we
+		// get the objectlist of only these keys.
+		itemVal := item.Val
+		if len(item.Keys) > 1 {
+			itemVal = n.Filter(keyStr)
+			done[keyStr] = struct{}{}
+		}
+
+		// Make the field name
+		fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+		// Get the key/value as reflection values
+		key := reflect.ValueOf(keyStr)
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// If we have a pre-existing value in the map, use that
+		oldVal := resultMap.MapIndex(key)
+		if oldVal.IsValid() {
+			val.Set(oldVal)
+		}
+
+		// Decode!
+		if err := d.decode(fieldName, itemVal, val); err != nil {
+			return err
+		}
+
+		// Set the value on the map
+		resultMap.SetMapIndex(key, val)
+	}
+
+	// Set the final map if we can
+	set.Set(resultMap)
+	return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	val := reflect.New(resultElemType)
+	if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+		return err
+	}
+
+	result.Set(val)
+	return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+	// Create the slice if it isn't nil
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	if result.IsNil() {
+		resultSliceType := reflect.SliceOf(resultElemType)
+		result = reflect.MakeSlice(
+			resultSliceType, 0, 0)
+	}
+
+	// Figure out the items we'll be copying into the slice
+	var items []ast.Node
+	switch n := node.(type) {
+	case *ast.ObjectList:
+		items = make([]ast.Node, len(n.Items))
+		for i, item := range n.Items {
+			items[i] = item
+		}
+	case *ast.ObjectType:
+		items = []ast.Node{n}
+	case *ast.ListType:
+		items = n.List
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("unknown slice type: %T", node),
+		}
+	}
+
+	for i, item := range items {
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+		// Decode
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// if item is an object that was decoded from ambiguous JSON and
+		// flattened, make sure it's expanded if it needs to decode into a
+		// defined structure.
+		item := expandObject(item, val)
+
+		if err := d.decode(fieldName, item, val); err != nil {
+			return err
+		}
+
+		// Append it onto the slice
+		result = reflect.Append(result, val)
+	}
+
+	set.Set(result)
+	return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+	item, ok := node.(*ast.ObjectItem)
+	if !ok {
+		return node
+	}
+
+	elemType := result.Type()
+
+	// our target type must be a struct
+	switch elemType.Kind() {
+	case reflect.Ptr:
+		switch elemType.Elem().Kind() {
+		case reflect.Struct:
+			//OK
+		default:
+			return node
+		}
+	case reflect.Struct:
+		//OK
+	default:
+		return node
+	}
+
+	// A list value will have a key and field name. If it had more fields,
+	// it wouldn't have been flattened.
+	if len(item.Keys) != 2 {
+		return node
+	}
+
+	keyToken := item.Keys[0].Token
+	item.Keys = item.Keys[1:]
+
+	// we need to un-flatten the ast enough to decode
+	newNode := &ast.ObjectItem{
+		Keys: []*ast.ObjectKey{
+			&ast.ObjectKey{
+				Token: keyToken,
+			},
+		},
+		Val: &ast.ObjectType{
+			List: &ast.ObjectList{
+				Items: []*ast.ObjectItem{item},
+			},
+		},
+	}
+
+	return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+			return nil
+		case token.STRING, token.HEREDOC:
+			result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+	}
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+	var item *ast.ObjectItem
+	if it, ok := node.(*ast.ObjectItem); ok {
+		item = it
+		node = it.Val
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	// Handle the special case where the object itself is a literal. Previously
+	// the yacc parser would always ensure top-level elements were arrays. The new
+	// parser does not make the same guarantees, thus we need to convert any
+	// top-level literal elements into a list.
+	if _, ok := node.(*ast.LiteralType); ok && item != nil {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	list, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+		}
+	}
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = result
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+			// Ignore fields with tag name "-"
+			if tagParts[0] == "-" {
+				continue
+			}
+
+			if fieldType.Anonymous {
+				fieldKind := fieldType.Type.Kind()
+				if fieldKind != reflect.Struct {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: unsupported type to struct: %s",
+							fieldType.Name, fieldKind),
+					}
+				}
+
+				// We have an embedded field. We "squash" the fields down
+				// if specified in the tag.
+				squash := false
+				for _, tag := range tagParts[1:] {
+					if tag == "squash" {
+						squash = true
+						break
+					}
+				}
+
+				if squash {
+					structs = append(
+						structs, result.FieldByName(fieldType.Name))
+					continue
+				}
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	usedKeys := make(map[string]struct{})
+	decodedFields := make([]string, 0, len(fields))
+	decodedFieldsVal := make([]reflect.Value, 0)
+	unusedKeysVal := make([]reflect.Value, 0)
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(tagName)
+		tagParts := strings.SplitN(tagValue, ",", 2)
+		if len(tagParts) >= 2 {
+			switch tagParts[1] {
+			case "decodedFields":
+				decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+				continue
+			case "key":
+				if item == nil {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+							name, fieldName),
+					}
+				}
+
+				fieldValue.SetString(item.Keys[0].Token.Value().(string))
+				continue
+			case "unusedKeys":
+				unusedKeysVal = append(unusedKeysVal, fieldValue)
+				continue
+			}
+		}
+
+		if tagParts[0] != "" {
+			fieldName = tagParts[0]
+		}
+
+		// Determine the element we'll use to decode. If it is a single
+		// match (only object with the field), then we decode it exactly.
+		// If it is a prefix match, then we decode the matches.
+		filter := list.Filter(fieldName)
+
+		prefixMatches := filter.Children()
+		matches := filter.Elem()
+		if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+			continue
+		}
+
+		// Track the used key
+		usedKeys[fieldName] = struct{}{}
+
+		// Create the field name and decode. We range over the elements
+		// because we actually want the value.
+		fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		if len(prefixMatches.Items) > 0 {
+			if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+				return err
+			}
+		}
+		for _, match := range matches.Items {
+			var decodeNode ast.Node = match.Val
+			if ot, ok := decodeNode.(*ast.ObjectType); ok {
+				decodeNode = &ast.ObjectList{Items: ot.List.Items}
+			}
+
+			if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+				return err
+			}
+		}
+
+		decodedFields = append(decodedFields, field.Name)
+	}
+
+	if len(decodedFieldsVal) > 0 {
+		// Sort it so that it is deterministic
+		sort.Strings(decodedFields)
+
+		for _, v := range decodedFieldsVal {
+			v.Set(reflect.ValueOf(decodedFields))
+		}
+	}
+
+	return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+	var nodeContainer struct {
+		Node ast.Node
+	}
+	value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+	return value.Type()
+}
diff --git a/decoder_test.go b/decoder_test.go
new file mode 100644
index 0000000..d81f52f
--- /dev/null
+++ b/decoder_test.go
@@ -0,0 +1,1248 @@
+package hcl
+
+import (
+	"io/ioutil"
+	"path/filepath"
+	"reflect"
+	"testing"
+	"time"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/spew/spew"
+)
+
+func TestDecode_interface(t *testing.T) {
+	cases := []struct {
+		File string
+		Err  bool
+		Out  interface{}
+	}{
+		{
+			"basic.hcl",
+			false,
+			map[string]interface{}{
+				"foo": "bar",
+				"bar": "${file(\"bing/bong.txt\")}",
+			},
+		},
+		{
+			"basic_squish.hcl",
+			false,
+			map[string]interface{}{
+				"foo":     "bar",
+				"bar":     "${file(\"bing/bong.txt\")}",
+				"foo-bar": "baz",
+			},
+		},
+		{
+			"empty.hcl",
+			false,
+			map[string]interface{}{
+				"resource": []map[string]interface{}{
+					map[string]interface{}{
+						"foo": []map[string]interface{}{
+							map[string]interface{}{},
+						},
+					},
+				},
+			},
+		},
+		{
+			"tfvars.hcl",
+			false,
+			map[string]interface{}{
+				"regularvar": "Should work",
+				"map.key1":   "Value",
+				"map.key2":   "Other value",
+			},
+		},
+		{
+			"escape.hcl",
+			false,
+			map[string]interface{}{
+				"foo":          "bar\"baz\\n",
+				"qux":          "back\\slash",
+				"bar":          "new\nline",
+				"qax":          `slash\:colon`,
+				"nested":       `${HH\\:mm\\:ss}`,
+				"nestedquotes": `${"\"stringwrappedinquotes\""}`,
+			},
+		},
+		{
+			"float.hcl",
+			false,
+			map[string]interface{}{
+				"a": 1.02,
+				"b": 2,
+			},
+		},
+		{
+			"multiline_bad.hcl",
+			true,
+			nil,
+		},
+		{
+			"multiline_literal.hcl",
+			true,
+			nil,
+		},
+		{
+			"multiline_literal_with_hil.hcl",
+			false,
+			map[string]interface{}{"multiline_literal_with_hil": "${hello\n  world}"},
+		},
+		{
+			"multiline_no_marker.hcl",
+			true,
+			nil,
+		},
+		{
+			"multiline.hcl",
+			false,
+			map[string]interface{}{"foo": "bar\nbaz\n"},
+		},
+		{
+			"multiline_indented.hcl",
+			false,
+			map[string]interface{}{"foo": "  bar\n  baz\n"},
+		},
+		{
+			"multiline_no_hanging_indent.hcl",
+			false,
+			map[string]interface{}{"foo": "  baz\n    bar\n      foo\n"},
+		},
+		{
+			"multiline_no_eof.hcl",
+			false,
+			map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
+		},
+		{
+			"multiline.json",
+			false,
+			map[string]interface{}{"foo": "bar\nbaz"},
+		},
+		{
+			"null_strings.json",
+			false,
+			map[string]interface{}{
+				"module": []map[string]interface{}{
+					map[string]interface{}{
+						"app": []map[string]interface{}{
+							map[string]interface{}{"foo": ""},
+						},
+					},
+				},
+			},
+		},
+		{
+			"scientific.json",
+			false,
+			map[string]interface{}{
+				"a": 1e-10,
+				"b": 1e+10,
+				"c": 1e10,
+				"d": 1.2e-10,
+				"e": 1.2e+10,
+				"f": 1.2e10,
+			},
+		},
+		{
+			"scientific.hcl",
+			false,
+			map[string]interface{}{
+				"a": 1e-10,
+				"b": 1e+10,
+				"c": 1e10,
+				"d": 1.2e-10,
+				"e": 1.2e+10,
+				"f": 1.2e10,
+			},
+		},
+		{
+			"terraform_heroku.hcl",
+			false,
+			map[string]interface{}{
+				"name": "terraform-test-app",
+				"config_vars": []map[string]interface{}{
+					map[string]interface{}{
+						"FOO": "bar",
+					},
+				},
+			},
+		},
+		{
+			"structure_multi.hcl",
+			false,
+			map[string]interface{}{
+				"foo": []map[string]interface{}{
+					map[string]interface{}{
+						"baz": []map[string]interface{}{
+							map[string]interface{}{"key": 7},
+						},
+					},
+					map[string]interface{}{
+						"bar": []map[string]interface{}{
+							map[string]interface{}{"key": 12},
+						},
+					},
+				},
+			},
+		},
+		{
+			"structure_multi.json",
+			false,
+			map[string]interface{}{
+				"foo": []map[string]interface{}{
+					map[string]interface{}{
+						"baz": []map[string]interface{}{
+							map[string]interface{}{"key": 7},
+						},
+					},
+					map[string]interface{}{
+						"bar": []map[string]interface{}{
+							map[string]interface{}{"key": 12},
+						},
+					},
+				},
+			},
+		},
+		{
+			"list_of_lists.hcl",
+			false,
+			map[string]interface{}{
+				"foo": []interface{}{
+					[]interface{}{"foo"},
+					[]interface{}{"bar"},
+				},
+			},
+		},
+		{
+			"list_of_maps.hcl",
+			false,
+			map[string]interface{}{
+				"foo": []interface{}{
+					map[string]interface{}{"somekey1": "someval1"},
+					map[string]interface{}{"somekey2": "someval2", "someextrakey": "someextraval"},
+				},
+			},
+		},
+		{
+			"assign_deep.hcl",
+			false,
+			map[string]interface{}{
+				"resource": []interface{}{
+					map[string]interface{}{
+						"foo": []interface{}{
+							map[string]interface{}{
+								"bar": []map[string]interface{}{
+									map[string]interface{}{}}}}}}},
+		},
+		{
+			"structure_list.hcl",
+			false,
+			map[string]interface{}{
+				"foo": []map[string]interface{}{
+					map[string]interface{}{
+						"key": 7,
+					},
+					map[string]interface{}{
+						"key": 12,
+					},
+				},
+			},
+		},
+		{
+			"structure_list.json",
+			false,
+			map[string]interface{}{
+				"foo": []map[string]interface{}{
+					map[string]interface{}{
+						"key": 7,
+					},
+					map[string]interface{}{
+						"key": 12,
+					},
+				},
+			},
+		},
+		{
+			"structure_list_deep.json",
+			false,
+			map[string]interface{}{
+				"bar": []map[string]interface{}{
+					map[string]interface{}{
+						"foo": []map[string]interface{}{
+							map[string]interface{}{
+								"name": "terraform_example",
+								"ingress": []map[string]interface{}{
+									map[string]interface{}{
+										"from_port": 22,
+									},
+									map[string]interface{}{
+										"from_port": 80,
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+
+		{
+			"structure_list_empty.json",
+			false,
+			map[string]interface{}{
+				"foo": []interface{}{},
+			},
+		},
+
+		{
+			"nested_block_comment.hcl",
+			false,
+			map[string]interface{}{
+				"bar": "value",
+			},
+		},
+
+		{
+			"unterminated_block_comment.hcl",
+			true,
+			nil,
+		},
+
+		{
+			"unterminated_brace.hcl",
+			true,
+			nil,
+		},
+
+		{
+			"nested_provider_bad.hcl",
+			true,
+			nil,
+		},
+
+		{
+			"object_list.json",
+			false,
+			map[string]interface{}{
+				"resource": []map[string]interface{}{
+					map[string]interface{}{
+						"aws_instance": []map[string]interface{}{
+							map[string]interface{}{
+								"db": []map[string]interface{}{
+									map[string]interface{}{
+										"vpc": "foo",
+										"provisioner": []map[string]interface{}{
+											map[string]interface{}{
+												"file": []map[string]interface{}{
+													map[string]interface{}{
+														"source":      "foo",
+														"destination": "bar",
+													},
+												},
+											},
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+
+		// Terraform GH-8295 sanity test that basic decoding into
+		// interface{} works.
+		{
+			"terraform_variable_invalid.json",
+			false,
+			map[string]interface{}{
+				"variable": []map[string]interface{}{
+					map[string]interface{}{
+						"whatever": "abc123",
+					},
+				},
+			},
+		},
+
+		{
+			"interpolate.json",
+			false,
+			map[string]interface{}{
+				"default": `${replace("europe-west", "-", " ")}`,
+			},
+		},
+
+		{
+			"block_assign.hcl",
+			true,
+			nil,
+		},
+
+		{
+			"escape_backslash.hcl",
+			false,
+			map[string]interface{}{
+				"output": []map[string]interface{}{
+					map[string]interface{}{
+						"one":  `${replace(var.sub_domain, ".", "\\.")}`,
+						"two":  `${replace(var.sub_domain, ".", "\\\\.")}`,
+						"many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`,
+					},
+				},
+			},
+		},
+
+		{
+			"git_crypt.hcl",
+			true,
+			nil,
+		},
+
+		{
+			"object_with_bool.hcl",
+			false,
+			map[string]interface{}{
+				"path": []map[string]interface{}{
+					map[string]interface{}{
+						"policy": "write",
+						"permissions": []map[string]interface{}{
+							map[string]interface{}{
+								"bool": []interface{}{false},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	for _, tc := range cases {
+		t.Run(tc.File, func(t *testing.T) {
+			d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
+			if err != nil {
+				t.Fatalf("err: %s", err)
+			}
+
+			var out interface{}
+			err = Decode(&out, string(d))
+			if (err != nil) != tc.Err {
+				t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+			}
+
+			if !reflect.DeepEqual(out, tc.Out) {
+				t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+			}
+
+			var v interface{}
+			err = Unmarshal(d, &v)
+			if (err != nil) != tc.Err {
+				t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+			}
+
+			if !reflect.DeepEqual(v, tc.Out) {
+				t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+			}
+		})
+	}
+}
+
+func TestDecode_interfaceInline(t *testing.T) {
+	cases := []struct {
+		Value string
+		Err   bool
+		Out   interface{}
+	}{
+		{"t t e{{}}", true, nil},
+		{"t=0t d {}", true, map[string]interface{}{"t": 0}},
+		{"v=0E0v d{}", true, map[string]interface{}{"v": float64(0)}},
+	}
+
+	for _, tc := range cases {
+		t.Logf("Testing: %q", tc.Value)
+
+		var out interface{}
+		err := Decode(&out, tc.Value)
+		if (err != nil) != tc.Err {
+			t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
+		}
+
+		if !reflect.DeepEqual(out, tc.Out) {
+			t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out)
+		}
+
+		var v interface{}
+		err = Unmarshal([]byte(tc.Value), &v)
+		if (err != nil) != tc.Err {
+			t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
+		}
+
+		if !reflect.DeepEqual(v, tc.Out) {
+			t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out)
+		}
+	}
+}
+
+func TestDecode_equal(t *testing.T) {
+	cases := []struct {
+		One, Two string
+	}{
+		{
+			"basic.hcl",
+			"basic.json",
+		},
+		{
+			"float.hcl",
+			"float.json",
+		},
+		/*
+			{
+				"structure.hcl",
+				"structure.json",
+			},
+		*/
+		{
+			"structure.hcl",
+			"structure_flat.json",
+		},
+		{
+			"terraform_heroku.hcl",
+			"terraform_heroku.json",
+		},
+	}
+
+	for _, tc := range cases {
+		p1 := filepath.Join(fixtureDir, tc.One)
+		p2 := filepath.Join(fixtureDir, tc.Two)
+
+		d1, err := ioutil.ReadFile(p1)
+		if err != nil {
+			t.Fatalf("err: %s", err)
+		}
+
+		d2, err := ioutil.ReadFile(p2)
+		if err != nil {
+			t.Fatalf("err: %s", err)
+		}
+
+		var i1, i2 interface{}
+		err = Decode(&i1, string(d1))
+		if err != nil {
+			t.Fatalf("err: %s", err)
+		}
+
+		err = Decode(&i2, string(d2))
+		if err != nil {
+			t.Fatalf("err: %s", err)
+		}
+
+		if !reflect.DeepEqual(i1, i2) {
+			t.Fatalf(
+				"%s != %s\n\n%#v\n\n%#v",
+				tc.One, tc.Two,
+				i1, i2)
+		}
+	}
+}
+
+func TestDecode_flatMap(t *testing.T) {
+	var val map[string]map[string]string
+
+	err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	expected := map[string]map[string]string{
+		"foo": map[string]string{
+			"foo": "bar",
+			"key": "7",
+		},
+	}
+
+	if !reflect.DeepEqual(val, expected) {
+		t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
+	}
+}
+
+func TestDecode_structure(t *testing.T) {
+	type Embedded interface{}
+
+	type V struct {
+		Embedded `hcl:"-"`
+		Key      int
+		Foo      string
+	}
+
+	var actual V
+
+	err := Decode(&actual, testReadFile(t, "flat.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	expected := V{
+		Key: 7,
+		Foo: "bar",
+	}
+
+	if !reflect.DeepEqual(actual, expected) {
+		t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+	}
+}
+
+func TestDecode_structurePtr(t *testing.T) {
+	type V struct {
+		Key int
+		Foo string
+	}
+
+	var actual *V
+
+	err := Decode(&actual, testReadFile(t, "flat.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	expected := &V{
+		Key: 7,
+		Foo: "bar",
+	}
+
+	if !reflect.DeepEqual(actual, expected) {
+		t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+	}
+}
+
+func TestDecode_structureArray(t *testing.T) {
+	// This test is extracted from a failure in Consul (consul.io),
+	// hence the interesting structure naming.
+
+	type KeyPolicyType string
+
+	type KeyPolicy struct {
+		Prefix string `hcl:",key"`
+		Policy KeyPolicyType
+	}
+
+	type Policy struct {
+		Keys []KeyPolicy `hcl:"key,expand"`
+	}
+
+	expected := Policy{
+		Keys: []KeyPolicy{
+			KeyPolicy{
+				Prefix: "",
+				Policy: "read",
+			},
+			KeyPolicy{
+				Prefix: "foo/",
+				Policy: "write",
+			},
+			KeyPolicy{
+				Prefix: "foo/bar/",
+				Policy: "read",
+			},
+			KeyPolicy{
+				Prefix: "foo/bar/baz",
+				Policy: "deny",
+			},
+		},
+	}
+
+	files := []string{
+		"decode_policy.hcl",
+		"decode_policy.json",
+	}
+
+	for _, f := range files {
+		var actual Policy
+
+		err := Decode(&actual, testReadFile(t, f))
+		if err != nil {
+			t.Fatalf("Input: %s\n\nerr: %s", f, err)
+		}
+
+		if !reflect.DeepEqual(actual, expected) {
+			t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+		}
+	}
+}
+
+func TestDecode_sliceExpand(t *testing.T) {
+	type testInner struct {
+		Name string `hcl:",key"`
+		Key  string
+	}
+
+	type testStruct struct {
+		Services []testInner `hcl:"service,expand"`
+	}
+
+	expected := testStruct{
+		Services: []testInner{
+			testInner{
+				Name: "my-service-0",
+				Key:  "value",
+			},
+			testInner{
+				Name: "my-service-1",
+				Key:  "value",
+			},
+		},
+	}
+
+	files := []string{
+		"slice_expand.hcl",
+	}
+
+	for _, f := range files {
+		t.Logf("Testing: %s", f)
+
+		var actual testStruct
+		err := Decode(&actual, testReadFile(t, f))
+		if err != nil {
+			t.Fatalf("Input: %s\n\nerr: %s", f, err)
+		}
+
+		if !reflect.DeepEqual(actual, expected) {
+			t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+		}
+	}
+}
+
+func TestDecode_structureMap(t *testing.T) {
+	// This test is extracted from a failure in Terraform (terraform.io),
+	// hence the interesting structure naming.
+
+	type hclVariable struct {
+		Default     interface{}
+		Description string
+		Fields      []string `hcl:",decodedFields"`
+	}
+
+	type rawConfig struct {
+		Variable map[string]hclVariable
+	}
+
+	expected := rawConfig{
+		Variable: map[string]hclVariable{
+			"foo": hclVariable{
+				Default:     "bar",
+				Description: "bar",
+				Fields:      []string{"Default", "Description"},
+			},
+
+			"amis": hclVariable{
+				Default: []map[string]interface{}{
+					map[string]interface{}{
+						"east": "foo",
+					},
+				},
+				Fields: []string{"Default"},
+			},
+		},
+	}
+
+	files := []string{
+		"decode_tf_variable.hcl",
+		"decode_tf_variable.json",
+	}
+
+	for _, f := range files {
+		t.Logf("Testing: %s", f)
+
+		var actual rawConfig
+		err := Decode(&actual, testReadFile(t, f))
+		if err != nil {
+			t.Fatalf("Input: %s\n\nerr: %s", f, err)
+		}
+
+		if !reflect.DeepEqual(actual, expected) {
+			t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+		}
+	}
+}
+
+func TestDecode_structureMapInvalid(t *testing.T) {
+	// Terraform GH-8295
+
+	type hclVariable struct {
+		Default     interface{}
+		Description string
+		Fields      []string `hcl:",decodedFields"`
+	}
+
+	type rawConfig struct {
+		Variable map[string]*hclVariable
+	}
+
+	var actual rawConfig
+	err := Decode(&actual, testReadFile(t, "terraform_variable_invalid.json"))
+	if err == nil {
+		t.Fatal("expected error")
+	}
+}
+
+func TestDecode_interfaceNonPointer(t *testing.T) {
+	var value interface{}
+	err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
+	if err == nil {
+		t.Fatal("should error")
+	}
+}
+
+func TestDecode_boolString(t *testing.T) {
+	var value struct {
+		Boolean bool
+	}
+
+	err := Decode(&value, testReadFile(t, "basic_bool_string.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if value.Boolean != true {
+		t.Fatalf("bad: %#v", value.Boolean)
+	}
+}
+
+func TestDecode_boolInt(t *testing.T) {
+	var value struct {
+		Boolean bool
+	}
+
+	err := Decode(&value, testReadFile(t, "basic_bool_int.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if value.Boolean != true {
+		t.Fatalf("bad: %#v", value.Boolean)
+	}
+}
+
+func TestDecode_bool(t *testing.T) {
+	var value struct {
+		Boolean bool
+	}
+
+	err := Decode(&value, testReadFile(t, "basic_bool.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if value.Boolean != true {
+		t.Fatalf("bad: %#v", value.Boolean)
+	}
+}
+
+func TestDecode_intString(t *testing.T) {
+	var value struct {
+		Count int
+	}
+
+	err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if value.Count != 3 {
+		t.Fatalf("bad: %#v", value.Count)
+	}
+}
+
+func TestDecode_float32(t *testing.T) {
+	var value struct {
+		A float32 `hcl:"a"`
+		B float32 `hcl:"b"`
+	}
+
+	err := Decode(&value, testReadFile(t, "float.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if got, want := value.A, float32(1.02); got != want {
+		t.Fatalf("wrong result %#v; want %#v", got, want)
+	}
+	if got, want := value.B, float32(2); got != want {
+		t.Fatalf("wrong result %#v; want %#v", got, want)
+	}
+}
+
+func TestDecode_float64(t *testing.T) {
+	var value struct {
+		A float64 `hcl:"a"`
+		B float64 `hcl:"b"`
+	}
+
+	err := Decode(&value, testReadFile(t, "float.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if got, want := value.A, float64(1.02); got != want {
+		t.Fatalf("wrong result %#v; want %#v", got, want)
+	}
+	if got, want := value.B, float64(2); got != want {
+		t.Fatalf("wrong result %#v; want %#v", got, want)
+	}
+}
+
+func TestDecode_intStringAliased(t *testing.T) {
+	var value struct {
+		Count time.Duration
+	}
+
+	err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if value.Count != time.Duration(3) {
+		t.Fatalf("bad: %#v", value.Count)
+	}
+}
+
+func TestDecode_Node(t *testing.T) {
+	// given
+	var value struct {
+		Content ast.Node
+		Nested  struct {
+			Content ast.Node
+		}
+	}
+
+	content := `
+content {
+	hello = "world"
+}
+`
+
+	// when
+	err := Decode(&value, content)
+
+	// then
+	if err != nil {
+		t.Errorf("unable to decode content, %v", err)
+		return
+	}
+
+	// verify ast.Node can be decoded later
+	var v map[string]interface{}
+	err = DecodeObject(&v, value.Content)
+	if err != nil {
+		t.Errorf("unable to decode content, %v", err)
+		return
+	}
+
+	if v["hello"] != "world" {
+		t.Errorf("expected mapping to be returned")
+	}
+}
+
+func TestDecode_NestedNode(t *testing.T) {
+	// given
+	var value struct {
+		Nested struct {
+			Content ast.Node
+		}
+	}
+
+	content := `
+nested "content" {
+	hello = "world"
+}
+`
+
+	// when
+	err := Decode(&value, content)
+
+	// then
+	if err != nil {
+		t.Errorf("unable to decode content, %v", err)
+		return
+	}
+
+	// verify ast.Node can be decoded later
+	var v map[string]interface{}
+	err = DecodeObject(&v, value.Nested.Content)
+	if err != nil {
+		t.Errorf("unable to decode content, %v", err)
+		return
+	}
+
+	if v["hello"] != "world" {
+		t.Errorf("expected mapping to be returned")
+	}
+}
+
+// https://github.com/hashicorp/hcl/issues/60
+func TestDecode_topLevelKeys(t *testing.T) {
+	type Template struct {
+		Source string
+	}
+
+	templates := struct {
+		Templates []*Template `hcl:"template"`
+	}{}
+
+	err := Decode(&templates, `
+	template {
+	    source = "blah"
+	}
+
+	template {
+	    source = "blahblah"
+	}`)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if templates.Templates[0].Source != "blah" {
+		t.Errorf("bad source: %s", templates.Templates[0].Source)
+	}
+
+	if templates.Templates[1].Source != "blahblah" {
+		t.Errorf("bad source: %s", templates.Templates[1].Source)
+	}
+}
+
+func TestDecode_flattenedJSON(t *testing.T) {
+	// make sure we can also correctly extract a Name key too
+	type V struct {
+		Name        string `hcl:",key"`
+		Description string
+		Default     map[string]string
+	}
+	type Vars struct {
+		Variable []*V
+	}
+
+	cases := []struct {
+		JSON     string
+		Out      interface{}
+		Expected interface{}
+	}{
+		{ // Nested object, no sibling keys
+			JSON: `
+{
+  "var_name": {
+    "default": {
+      "key1": "a",
+      "key2": "b"
+    }
+  }
+}
+			`,
+			Out: &[]*V{},
+			Expected: &[]*V{
+				&V{
+					Name:    "var_name",
+					Default: map[string]string{"key1": "a", "key2": "b"},
+				},
+			},
+		},
+
+		{ // Nested object with a sibling key (this worked previously)
+			JSON: `
+{
+  "var_name": {
+    "description": "Described",
+    "default": {
+      "key1": "a",
+      "key2": "b"
+    }
+  }
+}
+			`,
+			Out: &[]*V{},
+			Expected: &[]*V{
+				&V{
+					Name:        "var_name",
+					Description: "Described",
+					Default:     map[string]string{"key1": "a", "key2": "b"},
+				},
+			},
+		},
+
+		{ // Multiple nested objects, one with a sibling key
+			JSON: `
+{
+  "variable": {
+    "var_1": {
+      "default": {
+        "key1": "a",
+        "key2": "b"
+      }
+    },
+    "var_2": {
+      "description": "Described",
+      "default": {
+        "key1": "a",
+        "key2": "b"
+      }
+    }
+  }
+}
+			`,
+			Out: &Vars{},
+			Expected: &Vars{
+				Variable: []*V{
+					&V{
+						Name:    "var_1",
+						Default: map[string]string{"key1": "a", "key2": "b"},
+					},
+					&V{
+						Name:        "var_2",
+						Description: "Described",
+						Default:     map[string]string{"key1": "a", "key2": "b"},
+					},
+				},
+			},
+		},
+
+		{ // Nested object to maps
+			JSON: `
+{
+  "variable": {
+    "var_name": {
+      "description": "Described",
+      "default": {
+        "key1": "a",
+        "key2": "b"
+      }
+    }
+  }
+}
+			`,
+			Out: &[]map[string]interface{}{},
+			Expected: &[]map[string]interface{}{
+				{
+					"variable": []map[string]interface{}{
+						{
+							"var_name": []map[string]interface{}{
+								{
+									"description": "Described",
+									"default": []map[string]interface{}{
+										{
+											"key1": "a",
+											"key2": "b",
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+
+		{ // Nested object to maps without a sibling key should decode the same as above
+			JSON: `
+{
+  "variable": {
+    "var_name": {
+      "default": {
+        "key1": "a",
+        "key2": "b"
+      }
+    }
+  }
+}
+			`,
+			Out: &[]map[string]interface{}{},
+			Expected: &[]map[string]interface{}{
+				{
+					"variable": []map[string]interface{}{
+						{
+							"var_name": []map[string]interface{}{
+								{
+									"default": []map[string]interface{}{
+										{
+											"key1": "a",
+											"key2": "b",
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+
+		{ // Nested objects, one with a sibling key, and one without
+			JSON: `
+{
+  "variable": {
+    "var_1": {
+      "default": {
+        "key1": "a",
+        "key2": "b"
+      }
+    },
+    "var_2": {
+      "description": "Described",
+      "default": {
+        "key1": "a",
+        "key2": "b"
+      }
+    }
+  }
+}
+			`,
+			Out: &[]map[string]interface{}{},
+			Expected: &[]map[string]interface{}{
+				{
+					"variable": []map[string]interface{}{
+						{
+							"var_1": []map[string]interface{}{
+								{
+									"default": []map[string]interface{}{
+										{
+											"key1": "a",
+											"key2": "b",
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+				{
+					"variable": []map[string]interface{}{
+						{
+							"var_2": []map[string]interface{}{
+								{
+									"description": "Described",
+									"default": []map[string]interface{}{
+										{
+											"key1": "a",
+											"key2": "b",
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	for i, tc := range cases {
+		err := Decode(tc.Out, tc.JSON)
+		if err != nil {
+			t.Fatalf("[%d] err: %s", i, err)
+		}
+
+		if !reflect.DeepEqual(tc.Out, tc.Expected) {
+			t.Fatalf("[%d]\ngot: %s\nexpected: %s\n", i, spew.Sdump(tc.Out), spew.Sdump(tc.Expected))
+		}
+	}
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..4debbbe
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module github.com/hashicorp/hcl
+
+require github.com/davecgh/go-spew v1.1.1
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..b5e2922
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,2 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
diff --git a/google_init_test.go b/google_init_test.go
new file mode 100644
index 0000000..e7a2029
--- /dev/null
+++ b/google_init_test.go
@@ -0,0 +1,16 @@
+// This file contains google3 specific code to make tests work with blaze.
+
+package hcl
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+func init() {
+	dir := filepath.Join(os.Getenv("TEST_SRCDIR"), "google3/third_party/golang/hashicorp/hcl")
+	if err := os.Chdir(dir); err != nil {
+		panic(fmt.Sprintf("os.Chdir(%q): %v", dir, err))
+	}
+}
diff --git a/hcl.go b/hcl.go
new file mode 100644
index 0000000..575a20b
--- /dev/null
+++ b/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/hcl/ast/ast.go b/hcl/ast/ast.go
new file mode 100644
index 0000000..8b0ccee
--- /dev/null
+++ b/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+	"fmt"
+	"strings"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+	node()
+	Pos() token.Pos
+}
+
+func (File) node()         {}
+func (ObjectList) node()   {}
+func (ObjectKey) node()    {}
+func (ObjectItem) node()   {}
+func (Comment) node()      {}
+func (CommentGroup) node() {}
+func (ObjectType) node()   {}
+func (LiteralType) node()  {}
+func (ListType) node()     {}
+
+// File represents a single HCL file
+type File struct {
+	Node     Node            // usually a *ObjectList
+	Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+	return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+	Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+	o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		// If there aren't enough keys, then ignore this
+		if len(item.Keys) < len(keys) {
+			continue
+		}
+
+		match := true
+		for i, key := range item.Keys[:len(keys)] {
+			key := key.Token.Value().(string)
+			if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+				match = false
+				break
+			}
+		}
+		if !match {
+			continue
+		}
+
+		// Strip off the prefix from the children
+		newItem := *item
+		newItem.Keys = newItem.Keys[len(keys):]
+		result.Add(&newItem)
+	}
+
+	return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) > 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) == 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+	// always returns the uninitiliazed position
+	return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+	// keys is only one length long if it's of type assignment. If it's a
+	// nested object it can be larger than one. In that case "assign" is
+	// invalid as there is no assignments for a nested object.
+	Keys []*ObjectKey
+
+	// assign contains the position of "=", if any
+	Assign token.Pos
+
+	// val is the item itself. It can be an object,list, number, bool or a
+	// string. If key length is larger than one, val can be only of type
+	// Object.
+	Val Node
+
+	LeadComment *CommentGroup // associated lead comment
+	LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+	// I'm not entirely sure what causes this, but removing this causes
+	// a test failure. We should investigate at some point.
+	if len(o.Keys) == 0 {
+		return token.Pos{}
+	}
+
+	return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+	Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+	return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+	Token token.Token
+
+	// comment types, only used when in a list
+	LeadComment *CommentGroup
+	LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+	return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+	Lbrack token.Pos // position of "["
+	Rbrack token.Pos // position of "]"
+	List   []Node    // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+	return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+	l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+	Lbrace token.Pos   // position of "{"
+	Rbrace token.Pos   // position of "}"
+	List   *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+	return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+	Start token.Pos // position of / or #
+	Text  string
+}
+
+func (c *Comment) Pos() token.Pos {
+	return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+	List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+	return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string  { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/hcl/ast/ast_test.go b/hcl/ast/ast_test.go
new file mode 100644
index 0000000..1d2db17
--- /dev/null
+++ b/hcl/ast/ast_test.go
@@ -0,0 +1,200 @@
+package ast
+
+import (
+	"reflect"
+	"strings"
+	"testing"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+func TestObjectListFilter(t *testing.T) {
+	var cases = []struct {
+		Filter []string
+		Input  []*ObjectItem
+		Output []*ObjectItem
+	}{
+		{
+			[]string{"foo"},
+			[]*ObjectItem{
+				&ObjectItem{
+					Keys: []*ObjectKey{
+						&ObjectKey{
+							Token: token.Token{Type: token.STRING, Text: `"foo"`},
+						},
+					},
+				},
+			},
+			[]*ObjectItem{
+				&ObjectItem{
+					Keys: []*ObjectKey{},
+				},
+			},
+		},
+
+		{
+			[]string{"foo"},
+			[]*ObjectItem{
+				&ObjectItem{
+					Keys: []*ObjectKey{
+						&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+						&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+					},
+				},
+				&ObjectItem{
+					Keys: []*ObjectKey{
+						&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+					},
+				},
+			},
+			[]*ObjectItem{
+				&ObjectItem{
+					Keys: []*ObjectKey{
+						&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+					},
+				},
+			},
+		},
+	}
+
+	for _, tc := range cases {
+		input := &ObjectList{Items: tc.Input}
+		expected := &ObjectList{Items: tc.Output}
+		if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
+			t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
+		}
+	}
+}
+
+func TestWalk(t *testing.T) {
+	items := []*ObjectItem{
+		&ObjectItem{
+			Keys: []*ObjectKey{
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+			},
+			Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
+		},
+		&ObjectItem{
+			Keys: []*ObjectKey{
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+			},
+		},
+	}
+
+	node := &ObjectList{Items: items}
+
+	order := []string{
+		"*ast.ObjectList",
+		"*ast.ObjectItem",
+		"*ast.ObjectKey",
+		"*ast.ObjectKey",
+		"*ast.LiteralType",
+		"*ast.ObjectItem",
+		"*ast.ObjectKey",
+	}
+	count := 0
+
+	Walk(node, func(n Node) (Node, bool) {
+		if n == nil {
+			return n, false
+		}
+
+		typeName := reflect.TypeOf(n).String()
+		if order[count] != typeName {
+			t.Errorf("expected '%s' got: '%s'", order[count], typeName)
+		}
+		count++
+		return n, true
+	})
+}
+
+func TestWalkEquality(t *testing.T) {
+	items := []*ObjectItem{
+		&ObjectItem{
+			Keys: []*ObjectKey{
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+			},
+		},
+		&ObjectItem{
+			Keys: []*ObjectKey{
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+			},
+		},
+	}
+
+	node := &ObjectList{Items: items}
+
+	rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
+
+	newNode, ok := rewritten.(*ObjectList)
+	if !ok {
+		t.Fatalf("expected Objectlist, got %T", rewritten)
+	}
+
+	if !reflect.DeepEqual(node, newNode) {
+		t.Fatal("rewritten node is not equal to the given node")
+	}
+
+	if len(newNode.Items) != 2 {
+		t.Errorf("expected newNode length 2, got: %d", len(newNode.Items))
+	}
+
+	expected := []string{
+		`"foo"`,
+		`"bar"`,
+	}
+
+	for i, item := range newNode.Items {
+		if len(item.Keys) != 1 {
+			t.Errorf("expected keys newNode length 1, got: %d", len(item.Keys))
+		}
+
+		if item.Keys[0].Token.Text != expected[i] {
+			t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
+		}
+
+		if item.Val != nil {
+			t.Errorf("expected item value should be nil")
+		}
+	}
+}
+
+func TestWalkRewrite(t *testing.T) {
+	items := []*ObjectItem{
+		&ObjectItem{
+			Keys: []*ObjectKey{
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+			},
+		},
+		&ObjectItem{
+			Keys: []*ObjectKey{
+				&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+			},
+		},
+	}
+
+	node := &ObjectList{Items: items}
+
+	suffix := "_example"
+	node = Walk(node, func(n Node) (Node, bool) {
+		switch i := n.(type) {
+		case *ObjectKey:
+			i.Token.Text = i.Token.Text + suffix
+			n = i
+		}
+		return n, true
+	}).(*ObjectList)
+
+	Walk(node, func(n Node) (Node, bool) {
+		switch i := n.(type) {
+		case *ObjectKey:
+			if !strings.HasSuffix(i.Token.Text, suffix) {
+				t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
+			}
+		}
+		return n, true
+	})
+
+}
diff --git a/hcl/ast/walk.go b/hcl/ast/walk.go
new file mode 100644
index 0000000..ba07ad4
--- /dev/null
+++ b/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+	rewritten, ok := fn(node)
+	if !ok {
+		return rewritten
+	}
+
+	switch n := node.(type) {
+	case *File:
+		n.Node = Walk(n.Node, fn)
+	case *ObjectList:
+		for i, item := range n.Items {
+			n.Items[i] = Walk(item, fn).(*ObjectItem)
+		}
+	case *ObjectKey:
+		// nothing to do
+	case *ObjectItem:
+		for i, k := range n.Keys {
+			n.Keys[i] = Walk(k, fn).(*ObjectKey)
+		}
+
+		if n.Val != nil {
+			n.Val = Walk(n.Val, fn)
+		}
+	case *LiteralType:
+		// nothing to do
+	case *ListType:
+		for i, l := range n.List {
+			n.List[i] = Walk(l, fn)
+		}
+	case *ObjectType:
+		n.List = Walk(n.List, fn).(*ObjectList)
+	default:
+		// should we panic here?
+		fmt.Printf("unknown type: %T\n", n)
+	}
+
+	fn(nil)
+	return rewritten
+}
diff --git a/hcl/fmtcmd/fmtcmd.go b/hcl/fmtcmd/fmtcmd.go
new file mode 100644
index 0000000..9a0634c
--- /dev/null
+++ b/hcl/fmtcmd/fmtcmd.go
@@ -0,0 +1,162 @@
+// Derivative work from:
+//	- https://golang.org/src/cmd/gofmt/gofmt.go
+//	- https://github.com/fatih/hclfmt
+
+package fmtcmd
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/printer/printer"
+)
+
+var (
+	ErrWriteStdin = errors.New("cannot use write option with standard input")
+)
+
+type Options struct {
+	List  bool // list files whose formatting differs
+	Write bool // write result to (source) file instead of stdout
+	Diff  bool // display diffs of formatting changes
+}
+
+func isValidFile(f os.FileInfo, extensions []string) bool {
+	if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
+		for _, ext := range extensions {
+			if strings.HasSuffix(f.Name(), "."+ext) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// If in == nil, the source is the contents of the file with the given filename.
+func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
+	if in == nil {
+		f, err := os.Open(filename)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		in = f
+	}
+
+	src, err := ioutil.ReadAll(in)
+	if err != nil {
+		return err
+	}
+
+	res, err := printer.Format(src)
+	if err != nil {
+		return fmt.Errorf("In %s: %s", filename, err)
+	}
+
+	if !bytes.Equal(src, res) {
+		// formatting has changed
+		if opts.List {
+			fmt.Fprintln(out, filename)
+		}
+		if opts.Write {
+			err = ioutil.WriteFile(filename, res, 0644)
+			if err != nil {
+				return err
+			}
+		}
+		if opts.Diff {
+			data, err := diff(src, res)
+			if err != nil {
+				return fmt.Errorf("computing diff: %s", err)
+			}
+			fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
+			out.Write(data)
+		}
+	}
+
+	if !opts.List && !opts.Write && !opts.Diff {
+		_, err = out.Write(res)
+	}
+
+	return err
+}
+
+func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
+	visitFile := func(path string, f os.FileInfo, err error) error {
+		if err == nil && isValidFile(f, extensions) {
+			err = processFile(path, nil, stdout, false, opts)
+		}
+		return err
+	}
+
+	return filepath.Walk(path, visitFile)
+}
+
+func Run(
+	paths, extensions []string,
+	stdin io.Reader,
+	stdout io.Writer,
+	opts Options,
+) error {
+	if len(paths) == 0 {
+		if opts.Write {
+			return ErrWriteStdin
+		}
+		if err := processFile("<standard input>", stdin, stdout, true, opts); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	for _, path := range paths {
+		switch dir, err := os.Stat(path); {
+		case err != nil:
+			return err
+		case dir.IsDir():
+			if err := walkDir(path, extensions, stdout, opts); err != nil {
+				return err
+			}
+		default:
+			if err := processFile(path, nil, stdout, false, opts); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+	f1, err := ioutil.TempFile("", "")
+	if err != nil {
+		return
+	}
+	defer os.Remove(f1.Name())
+	defer f1.Close()
+
+	f2, err := ioutil.TempFile("", "")
+	if err != nil {
+		return
+	}
+	defer os.Remove(f2.Name())
+	defer f2.Close()
+
+	f1.Write(b1)
+	f2.Write(b2)
+
+	data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+	if len(data) > 0 {
+		// diff exits with a non-zero status when the files don't match.
+		// Ignore that failure as long as we get output.
+		err = nil
+	}
+	return
+}
diff --git a/hcl/fmtcmd/fmtcmd_test.go b/hcl/fmtcmd/fmtcmd_test.go
new file mode 100644
index 0000000..4a152ad
--- /dev/null
+++ b/hcl/fmtcmd/fmtcmd_test.go
@@ -0,0 +1,440 @@
+// +build !windows
+// TODO(jen20): These need fixing on Windows but fmt is not used right now
+// and red CI is making it harder to process other bugs, so ignore until
+// we get around to fixing them.
+
+package fmtcmd
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"reflect"
+	"regexp"
+	"sort"
+	"syscall"
+	"testing"
+
+	"google3/third_party/golang/hashicorp/hcl/testhelper/testhelper"
+)
+
+var fixtureExtensions = []string{"hcl"}
+
+func init() {
+	sort.Sort(ByFilename(fixtures))
+}
+
+func TestIsValidFile(t *testing.T) {
+	const fixtureDir = "./test-fixtures"
+
+	cases := []struct {
+		Path     string
+		Expected bool
+	}{
+		{"good.hcl", true},
+		{".hidden.ignore", false},
+		{"file.ignore", false},
+		{"dir.ignore", false},
+	}
+
+	for _, tc := range cases {
+		file, err := os.Stat(filepath.Join(fixtureDir, tc.Path))
+		if err != nil {
+			t.Errorf("unexpected error: %s", err)
+		}
+
+		if res := isValidFile(file, fixtureExtensions); res != tc.Expected {
+			t.Errorf("want: %t, got: %t", tc.Expected, res)
+		}
+	}
+}
+
+func TestRunMultiplePaths(t *testing.T) {
+	path1, err := renderFixtures("")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path1)
+	path2, err := renderFixtures("")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path2)
+
+	var expectedOut bytes.Buffer
+	for _, path := range []string{path1, path2} {
+		for _, fixture := range fixtures {
+			if !bytes.Equal(fixture.golden, fixture.input) {
+				expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
+			}
+		}
+	}
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{path1, path2},
+		fixtureExtensions,
+		nil, stdout,
+		Options{
+			List: true,
+		},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if stdout.String() != expectedOut.String() {
+		t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String())
+	}
+}
+
+func TestRunSubDirectories(t *testing.T) {
+	pathParent, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(pathParent)
+
+	path1, err := renderFixtures(pathParent)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	path2, err := renderFixtures(pathParent)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+
+	paths := []string{path1, path2}
+	sort.Strings(paths)
+
+	var expectedOut bytes.Buffer
+	for _, path := range paths {
+		for _, fixture := range fixtures {
+			if !bytes.Equal(fixture.golden, fixture.input) {
+				expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
+			}
+		}
+	}
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{pathParent},
+		fixtureExtensions,
+		nil, stdout,
+		Options{
+			List: true,
+		},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if stdout.String() != expectedOut.String() {
+		t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String())
+	}
+}
+
+func TestRunStdin(t *testing.T) {
+	var expectedOut bytes.Buffer
+	for i, fixture := range fixtures {
+		if i != 0 {
+			expectedOut.WriteString("\n")
+		}
+		expectedOut.Write(fixture.golden)
+	}
+
+	stdin, stdout := mockIO()
+	for _, fixture := range fixtures {
+		stdin.Write(fixture.input)
+	}
+
+	err := Run(
+		[]string{},
+		fixtureExtensions,
+		stdin, stdout,
+		Options{},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) {
+		t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String())
+	}
+}
+
+func TestRunStdinAndWrite(t *testing.T) {
+	var expectedOut = []byte{}
+
+	stdin, stdout := mockIO()
+	stdin.WriteString("")
+	err := Run(
+		[]string{}, []string{},
+		stdin, stdout,
+		Options{
+			Write: true,
+		},
+	)
+
+	if err != ErrWriteStdin {
+		t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err)
+	}
+	if !bytes.Equal(stdout.Bytes(), expectedOut) {
+		t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+	}
+}
+
+func TestRunFileError(t *testing.T) {
+	path, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path)
+	filename := filepath.Join(path, "unreadable.hcl")
+
+	var expectedError = &os.PathError{
+		Op:   "open",
+		Path: filename,
+		Err:  syscall.EACCES,
+	}
+
+	err = ioutil.WriteFile(filename, []byte{}, 0000)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{path},
+		fixtureExtensions,
+		nil, stdout,
+		Options{},
+	)
+
+	if !reflect.DeepEqual(err, expectedError) {
+		t.Errorf("error want: %#v, got: %#v", expectedError, err)
+	}
+}
+
+func TestRunNoOptions(t *testing.T) {
+	path, err := renderFixtures("")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path)
+
+	var expectedOut bytes.Buffer
+	for _, fixture := range fixtures {
+		expectedOut.Write(fixture.golden)
+	}
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{path},
+		fixtureExtensions,
+		nil, stdout,
+		Options{},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if stdout.String() != expectedOut.String() {
+		t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String())
+	}
+}
+
+func TestRunList(t *testing.T) {
+	path, err := renderFixtures("")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path)
+
+	var expectedOut bytes.Buffer
+	for _, fixture := range fixtures {
+		if !bytes.Equal(fixture.golden, fixture.input) {
+			expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename)))
+		}
+	}
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{path},
+		fixtureExtensions,
+		nil, stdout,
+		Options{
+			List: true,
+		},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if stdout.String() != expectedOut.String() {
+		t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String())
+	}
+}
+
+func TestRunWrite(t *testing.T) {
+	path, err := renderFixtures("")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path)
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{path},
+		fixtureExtensions,
+		nil, stdout,
+		Options{
+			Write: true,
+		},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	for _, fixture := range fixtures {
+		res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename))
+		if err != nil {
+			t.Errorf("unexpected error: %s", err)
+		}
+		if !bytes.Equal(res, fixture.golden) {
+			t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res)
+		}
+	}
+}
+
+func TestRunDiff(t *testing.T) {
+	path, err := renderFixtures("")
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	defer os.RemoveAll(path)
+
+	var expectedOut bytes.Buffer
+	for _, fixture := range fixtures {
+		if len(fixture.diff) > 0 {
+			expectedOut.WriteString(
+				regexp.QuoteMeta(
+					fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename),
+				),
+			)
+			// Need to use regex to ignore datetimes in diff.
+			expectedOut.WriteString(`--- .+?\n`)
+			expectedOut.WriteString(`\+\+\+ .+?\n`)
+			expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff)))
+		}
+	}
+
+	expectedOutString := testhelper.Unix2dos(expectedOut.String())
+
+	_, stdout := mockIO()
+	err = Run(
+		[]string{path},
+		fixtureExtensions,
+		nil, stdout,
+		Options{
+			Diff: true,
+		},
+	)
+
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) {
+		t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout)
+	}
+}
+
+func mockIO() (stdin, stdout *bytes.Buffer) {
+	return new(bytes.Buffer), new(bytes.Buffer)
+}
+
+type fixture struct {
+	filename            string
+	input, golden, diff []byte
+}
+
+type ByFilename []fixture
+
+func (s ByFilename) Len() int           { return len(s) }
+func (s ByFilename) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) }
+
+var fixtures = []fixture{
+	{
+		"noop.hcl",
+		[]byte(`resource "aws_security_group" "firewall" {
+  count = 5
+}
+`),
+		[]byte(`resource "aws_security_group" "firewall" {
+  count = 5
+}
+`),
+		[]byte(``),
+	}, {
+		"align_equals.hcl",
+		[]byte(`variable "foo" {
+  default = "bar"
+  description = "bar"
+}
+`),
+		[]byte(`variable "foo" {
+  default     = "bar"
+  description = "bar"
+}
+`),
+		[]byte(`@@ -1,4 +1,4 @@
+ variable "foo" {
+-  default = "bar"
++  default     = "bar"
+   description = "bar"
+ }
+`),
+	}, {
+		"indentation.hcl",
+		[]byte(`provider "aws" {
+    access_key = "foo"
+    secret_key = "bar"
+}
+`),
+		[]byte(`provider "aws" {
+  access_key = "foo"
+  secret_key = "bar"
+}
+`),
+		[]byte(`@@ -1,4 +1,4 @@
+ provider "aws" {
+-    access_key = "foo"
+-    secret_key = "bar"
++  access_key = "foo"
++  secret_key = "bar"
+ }
+`),
+	},
+}
+
+// parent can be an empty string, in which case the system's default
+// temporary directory will be used.
+func renderFixtures(parent string) (path string, err error) {
+	path, err = ioutil.TempDir(parent, "")
+	if err != nil {
+		return "", err
+	}
+
+	for _, fixture := range fixtures {
+		err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644)
+		if err != nil {
+			os.RemoveAll(path)
+			return "", err
+		}
+	}
+
+	return path, nil
+}
diff --git a/hcl/fmtcmd/google_init_test.go b/hcl/fmtcmd/google_init_test.go
new file mode 100644
index 0000000..92a4daf
--- /dev/null
+++ b/hcl/fmtcmd/google_init_test.go
@@ -0,0 +1,16 @@
+// This file contains google3 specific code to make tests work with blaze.
+
+package fmtcmd
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+func init() {
+	dir := filepath.Join(os.Getenv("TEST_SRCDIR"), "google3/third_party/golang/hashicorp/hcl/hcl/fmtcmd")
+	if err := os.Chdir(dir); err != nil {
+		panic(fmt.Sprintf("os.Chdir(%q): %v", dir, err))
+	}
+}
diff --git a/hcl/fmtcmd/test-fixtures/.hidden.ignore b/hcl/fmtcmd/test-fixtures/.hidden.ignore
new file mode 100644
index 0000000..9977a28
--- /dev/null
+++ b/hcl/fmtcmd/test-fixtures/.hidden.ignore
@@ -0,0 +1 @@
+invalid
diff --git a/hcl/fmtcmd/test-fixtures/dir.ignore b/hcl/fmtcmd/test-fixtures/dir.ignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hcl/fmtcmd/test-fixtures/dir.ignore
diff --git a/hcl/fmtcmd/test-fixtures/file.ignore b/hcl/fmtcmd/test-fixtures/file.ignore
new file mode 100644
index 0000000..9977a28
--- /dev/null
+++ b/hcl/fmtcmd/test-fixtures/file.ignore
@@ -0,0 +1 @@
+invalid
diff --git a/hcl/fmtcmd/test-fixtures/good.hcl b/hcl/fmtcmd/test-fixtures/good.hcl
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hcl/fmtcmd/test-fixtures/good.hcl
diff --git a/hcl/parser/error.go b/hcl/parser/error.go
new file mode 100644
index 0000000..d6c53c9
--- /dev/null
+++ b/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+	"fmt"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+	Pos token.Pos
+	Err error
+}
+
+func (e *PosError) Error() string {
+	return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/hcl/parser/error_test.go b/hcl/parser/error_test.go
new file mode 100644
index 0000000..32399fe
--- /dev/null
+++ b/hcl/parser/error_test.go
@@ -0,0 +1,9 @@
+package parser
+
+import (
+	"testing"
+)
+
+func TestPosError_impl(t *testing.T) {
+	var _ error = new(PosError)
+}
diff --git a/hcl/parser/google_init_test.go b/hcl/parser/google_init_test.go
new file mode 100644
index 0000000..ffafe80
--- /dev/null
+++ b/hcl/parser/google_init_test.go
@@ -0,0 +1,16 @@
+// This file contains google3 specific code to make tests work with blaze.
+
+package parser
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+func init() {
+	dir := filepath.Join(os.Getenv("TEST_SRCDIR"), "google3/third_party/golang/hashicorp/hcl/hcl/parser")
+	if err := os.Chdir(dir); err != nil {
+		panic(fmt.Sprintf("os.Chdir(%q): %v", dir, err))
+	}
+}
diff --git a/hcl/parser/parser.go b/hcl/parser/parser.go
new file mode 100644
index 0000000..dbf608c
--- /dev/null
+++ b/hcl/parser/parser.go
@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/hashicorp/hcl/hcl/scanner/scanner"
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	comments    []*ast.CommentGroup
+	leadComment *ast.CommentGroup // last lead comment
+	lineComment *ast.CommentGroup // last line comment
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	// normalize all line endings
+	// since the scanner and output only work with "\n" line endings, we may
+	// end up with dangling "\r" characters in the parsed data.
+	src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+	}
+
+	f.Node, err = p.objectList(false)
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	f.Comments = p.comments
+	return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		if obj {
+			tok := p.scan()
+			p.unscan()
+			if tok.Type == token.RBRACE {
+				break
+			}
+		}
+
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// object lists can be optionally comma-delimited e.g. when a list of maps
+		// is being expressed, so a comma is allowed here - it's simply consumed
+		tok := p.scan()
+		if tok.Type != token.COMMA {
+			p.unscan()
+		}
+	}
+	return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+	endline = p.tok.Pos.Line
+
+	// count the endline if it's multiline comment, ie starting with /*
+	if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.tok.Text); i++ {
+			if p.tok.Text[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+	p.tok = p.sc.Scan()
+	return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.tok.Pos.Line
+
+	for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	// add comment group to the comments list
+	comments = &ast.CommentGroup{List: list}
+	p.comments = append(p.comments, comments)
+
+	return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if len(keys) > 0 && err == errEofToken {
+		// We ignore eof token here since it is an error if we didn't
+		// receive a value (but we did receive a key) for the item.
+		err = nil
+	}
+	if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+		// This is a strange boolean statement, but what it means is:
+		// We have keys with no value, and we're likely in an object
+		// (since RBrace ends an object). For this, we set err to nil so
+		// we continue and get the error below of having the wrong value
+		// type.
+		err = nil
+
+		// Reset the token type so we don't think it completed fine. See
+		// objectType which uses p.tok.Type to check if we're done with
+		// the object.
+		p.tok.Type = token.EOF
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	if p.leadComment != nil {
+		o.LeadComment = p.leadComment
+		p.leadComment = nil
+	}
+
+	switch p.tok.Type {
+	case token.ASSIGN:
+		o.Assign = p.tok.Pos
+		o.Val, err = p.object()
+		if err != nil {
+			return nil, err
+		}
+	case token.LBRACE:
+		o.Val, err = p.objectType()
+		if err != nil {
+			return nil, err
+		}
+	default:
+		keyStr := make([]string, 0, len(keys))
+		for _, k := range keys {
+			keyStr = append(keyStr, k.Token.Text)
+		}
+
+		return nil, &PosError{
+			Pos: p.tok.Pos,
+			Err: fmt.Errorf(
+				"key '%s' expected start of object ('{') or assignment ('=')",
+				strings.Join(keyStr, " ")),
+		}
+	}
+
+	// key=#comment
+	// val
+	if p.lineComment != nil {
+		o.LineComment, p.lineComment = p.lineComment, nil
+	}
+
+	// do a look-ahead for line comment
+	p.scan()
+	if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+		o.LineComment = p.lineComment
+		p.lineComment = nil
+	}
+	p.unscan()
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			// It is very important to also return the keys here as well as
+			// the error. This is because we need to be able to tell if we
+			// did parse keys prior to finding the EOF, or if we just found
+			// a bare EOF.
+			return keys, errEofToken
+		case token.ASSIGN:
+			// assignment or object only, but not nested objects. this is not
+			// allowed: `foo bar = {}`
+			if keyCount > 1 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+				}
+			}
+
+			if keyCount == 0 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: errors.New("no object keys found!"),
+				}
+			}
+
+			return keys, nil
+		case token.LBRACE:
+			var err error
+
+			// If we have no keys, then it is a syntax error. i.e. {{}} is not
+			// allowed.
+			if len(keys) == 0 {
+				err = &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+				}
+			}
+
+			// object
+			return keys, err
+		case token.IDENT, token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{Token: p.tok})
+		case token.ILLEGAL:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("illegal character"),
+			}
+		default:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+			}
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.COMMENT:
+		// implement comment
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, &PosError{
+		Pos: tok.Pos,
+		Err: fmt.Errorf("Unknown token: %+v", tok),
+	}
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{
+		Lbrace: p.tok.Pos,
+	}
+
+	l, err := p.objectList(true)
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	// No error, scan and expect the ending to be a brace
+	if tok := p.scan(); tok.Type != token.RBRACE {
+		return nil, &PosError{
+			Pos: tok.Pos,
+			Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+		}
+	}
+
+	o.List = l
+	o.Rbrace = p.tok.Pos // advanced via parseObjectList
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{
+		Lbrack: p.tok.Pos,
+	}
+
+	needComma := false
+	for {
+		tok := p.scan()
+		if needComma {
+			switch tok.Type {
+			case token.COMMA, token.RBRACK:
+			default:
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error parsing list, expected comma or list end, got: %s",
+						tok.Type),
+				}
+			}
+		}
+		switch tok.Type {
+		case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			// If there is a lead comment, apply it
+			if p.leadComment != nil {
+				node.LeadComment = p.leadComment
+				p.leadComment = nil
+			}
+
+			l.Add(node)
+			needComma = true
+		case token.COMMA:
+			// get next list item or we are at the end
+			// do a look-ahead for line comment
+			p.scan()
+			if p.lineComment != nil && len(l.List) > 0 {
+				lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+				if ok {
+					lit.LineComment = p.lineComment
+					l.List[len(l.List)-1] = lit
+					p.lineComment = nil
+				}
+			}
+			p.unscan()
+
+			needComma = false
+			continue
+		case token.LBRACE:
+			// Looks like a nested object, so parse it out
+			node, err := p.objectType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse object within list: %s", err),
+				}
+			}
+			l.Add(node)
+			needComma = true
+		case token.LBRACK:
+			node, err := p.listType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse list within list: %s", err),
+				}
+			}
+			l.Add(node)
+		case token.RBRACK:
+			// finished
+			l.Rbrack = p.tok.Pos
+			return l, nil
+		default:
+			return nil, &PosError{
+				Pos: tok.Pos,
+				Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+			}
+		}
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok,
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	// Otherwise read the next token from the scanner and Save it to the buffer
+	// in case we unscan later.
+	prev := p.tok
+	p.tok = p.sc.Scan()
+
+	if p.tok.Type == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+		// p.tok.Pos.Line, prev.Pos.Line, endline)
+		if p.tok.Pos.Line == prev.Pos.Line {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup(0)
+			if p.tok.Pos.Line != endline {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				p.lineComment = comment
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok.Type == token.COMMENT {
+			comment, endline = p.consumeCommentGroup(1)
+		}
+
+		if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+			switch p.tok.Type {
+			case token.RBRACE, token.RBRACK:
+				// Do not count for these cases
+			default:
+				// The next token is following on the line immediately after the
+				// comment group, thus the last comment group is a lead comment.
+				p.leadComment = comment
+			}
+		}
+
+	}
+
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}
diff --git a/hcl/parser/parser_test.go b/hcl/parser/parser_test.go
new file mode 100644
index 0000000..2f5218e
--- /dev/null
+++ b/hcl/parser/parser_test.go
@@ -0,0 +1,575 @@
+package parser
+
+import (
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strings"
+	"testing"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+func TestType(t *testing.T) {
+	var literals = []struct {
+		typ token.Type
+		src string
+	}{
+		{token.STRING, `foo = "foo"`},
+		{token.NUMBER, `foo = 123`},
+		{token.NUMBER, `foo = -29`},
+		{token.FLOAT, `foo = 123.12`},
+		{token.FLOAT, `foo = -123.12`},
+		{token.BOOL, `foo = true`},
+		{token.HEREDOC, "foo = <<EOF\nHello\nWorld\nEOF"},
+	}
+
+	for _, l := range literals {
+		p := newParser([]byte(l.src))
+		item, err := p.objectItem()
+		if err != nil {
+			t.Error(err)
+		}
+
+		lit, ok := item.Val.(*ast.LiteralType)
+		if !ok {
+			t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		if lit.Token.Type != l.typ {
+			t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
+		}
+	}
+}
+
+func TestListType(t *testing.T) {
+	var literals = []struct {
+		src    string
+		tokens []token.Type
+	}{
+		{
+			`foo = ["123", 123]`,
+			[]token.Type{token.STRING, token.NUMBER},
+		},
+		{
+			`foo = [123, "123",]`,
+			[]token.Type{token.NUMBER, token.STRING},
+		},
+		{
+			`foo = [false]`,
+			[]token.Type{token.BOOL},
+		},
+		{
+			`foo = []`,
+			[]token.Type{},
+		},
+		{
+			`foo = [1,
+"string",
+<<EOF
+heredoc contents
+EOF
+]`,
+			[]token.Type{token.NUMBER, token.STRING, token.HEREDOC},
+		},
+	}
+
+	for _, l := range literals {
+		p := newParser([]byte(l.src))
+		item, err := p.objectItem()
+		if err != nil {
+			t.Error(err)
+		}
+
+		list, ok := item.Val.(*ast.ListType)
+		if !ok {
+			t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		tokens := []token.Type{}
+		for _, li := range list.List {
+			if tp, ok := li.(*ast.LiteralType); ok {
+				tokens = append(tokens, tp.Token.Type)
+			}
+		}
+
+		equals(t, l.tokens, tokens)
+	}
+}
+
+func TestListOfMaps(t *testing.T) {
+	src := `foo = [
+    {key = "bar"},
+    {key = "baz", key2 = "qux"},
+  ]`
+	p := newParser([]byte(src))
+
+	file, err := p.Parse()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Here we make all sorts of assumptions about the input structure w/ type
+	// assertions. The intent is only for this to be a "smoke test" ensuring
+	// parsing actually performed its duty - giving this test something a bit
+	// more robust than _just_ "no error occurred".
+	expected := []string{`"bar"`, `"baz"`, `"qux"`}
+	actual := make([]string, 0, 3)
+	ol := file.Node.(*ast.ObjectList)
+	objItem := ol.Items[0]
+	list := objItem.Val.(*ast.ListType)
+	for _, node := range list.List {
+		obj := node.(*ast.ObjectType)
+		for _, item := range obj.List.Items {
+			val := item.Val.(*ast.LiteralType)
+			actual = append(actual, val.Token.Text)
+		}
+
+	}
+	if !reflect.DeepEqual(expected, actual) {
+		t.Fatalf("Expected: %#v, got %#v", expected, actual)
+	}
+}
+
+func TestListOfMaps_requiresComma(t *testing.T) {
+	src := `foo = [
+    {key = "bar"}
+    {key = "baz"}
+  ]`
+	p := newParser([]byte(src))
+
+	_, err := p.Parse()
+	if err == nil {
+		t.Fatalf("Expected error, got none!")
+	}
+
+	expected := "error parsing list, expected comma or list end"
+	if !strings.Contains(err.Error(), expected) {
+		t.Fatalf("Expected err:\n  %s\nTo contain:\n  %s\n", err, expected)
+	}
+}
+
+func TestListType_leadComment(t *testing.T) {
+	var literals = []struct {
+		src     string
+		comment []string
+	}{
+		{
+			`foo = [
+			1,
+			# bar
+			2,
+			3,
+			]`,
+			[]string{"", "# bar", ""},
+		},
+	}
+
+	for _, l := range literals {
+		p := newParser([]byte(l.src))
+		item, err := p.objectItem()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		list, ok := item.Val.(*ast.ListType)
+		if !ok {
+			t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		if len(list.List) != len(l.comment) {
+			t.Fatalf("bad: %d", len(list.List))
+		}
+
+		for i, li := range list.List {
+			lt := li.(*ast.LiteralType)
+			comment := l.comment[i]
+
+			if (lt.LeadComment == nil) != (comment == "") {
+				t.Fatalf("bad: %#v", lt)
+			}
+
+			if comment == "" {
+				continue
+			}
+
+			actual := lt.LeadComment.List[0].Text
+			if actual != comment {
+				t.Fatalf("bad: %q %q", actual, comment)
+			}
+		}
+	}
+}
+
+func TestListType_lineComment(t *testing.T) {
+	var literals = []struct {
+		src     string
+		comment []string
+	}{
+		{
+			`foo = [
+			1,
+			2, # bar
+			3,
+			]`,
+			[]string{"", "# bar", ""},
+		},
+	}
+
+	for _, l := range literals {
+		p := newParser([]byte(l.src))
+		item, err := p.objectItem()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		list, ok := item.Val.(*ast.ListType)
+		if !ok {
+			t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		if len(list.List) != len(l.comment) {
+			t.Fatalf("bad: %d", len(list.List))
+		}
+
+		for i, li := range list.List {
+			lt := li.(*ast.LiteralType)
+			comment := l.comment[i]
+
+			if (lt.LineComment == nil) != (comment == "") {
+				t.Fatalf("bad: %s", lt)
+			}
+
+			if comment == "" {
+				continue
+			}
+
+			actual := lt.LineComment.List[0].Text
+			if actual != comment {
+				t.Fatalf("bad: %q %q", actual, comment)
+			}
+		}
+	}
+}
+
+func TestObjectType(t *testing.T) {
+	var literals = []struct {
+		src      string
+		nodeType []ast.Node
+		itemLen  int
+	}{
+		{
+			`foo = {}`,
+			nil,
+			0,
+		},
+		{
+			`foo = {
+				bar = "fatih"
+			 }`,
+			[]ast.Node{&ast.LiteralType{}},
+			1,
+		},
+		{
+			`foo = {
+				bar = "fatih"
+				baz = ["arslan"]
+			 }`,
+			[]ast.Node{
+				&ast.LiteralType{},
+				&ast.ListType{},
+			},
+			2,
+		},
+		{
+			`foo = {
+				bar {}
+			 }`,
+			[]ast.Node{
+				&ast.ObjectType{},
+			},
+			1,
+		},
+		{
+			`foo {
+				bar {}
+				foo = true
+			 }`,
+			[]ast.Node{
+				&ast.ObjectType{},
+				&ast.LiteralType{},
+			},
+			2,
+		},
+	}
+
+	for _, l := range literals {
+		t.Logf("Source: %s", l.src)
+
+		p := newParser([]byte(l.src))
+		// p.enableTrace = true
+		item, err := p.objectItem()
+		if err != nil {
+			t.Error(err)
+			continue
+		}
+
+		// we know that the ObjectKey name is foo for all cases, what matters
+		// is the object
+		obj, ok := item.Val.(*ast.ObjectType)
+		if !ok {
+			t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+			continue
+		}
+
+		// check if the total length of items are correct
+		equals(t, l.itemLen, len(obj.List.Items))
+
+		// check if the types are correct
+		for i, item := range obj.List.Items {
+			equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+		}
+	}
+}
+
+func TestObjectKey(t *testing.T) {
+	keys := []struct {
+		exp []token.Type
+		src string
+	}{
+		{[]token.Type{token.IDENT}, `foo {}`},
+		{[]token.Type{token.IDENT}, `foo = {}`},
+		{[]token.Type{token.IDENT}, `foo = bar`},
+		{[]token.Type{token.IDENT}, `foo = 123`},
+		{[]token.Type{token.IDENT}, `foo = "${var.bar}`},
+		{[]token.Type{token.STRING}, `"foo" {}`},
+		{[]token.Type{token.STRING}, `"foo" = {}`},
+		{[]token.Type{token.STRING}, `"foo" = "${var.bar}`},
+		{[]token.Type{token.IDENT, token.IDENT}, `foo bar {}`},
+		{[]token.Type{token.IDENT, token.STRING}, `foo "bar" {}`},
+		{[]token.Type{token.STRING, token.IDENT}, `"foo" bar {}`},
+		{[]token.Type{token.IDENT, token.IDENT, token.IDENT}, `foo bar baz {}`},
+	}
+
+	for _, k := range keys {
+		p := newParser([]byte(k.src))
+		keys, err := p.objectKey()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		tokens := []token.Type{}
+		for _, o := range keys {
+			tokens = append(tokens, o.Token.Type)
+		}
+
+		equals(t, k.exp, tokens)
+	}
+
+	errKeys := []struct {
+		src string
+	}{
+		{`foo 12 {}`},
+		{`foo bar = {}`},
+		{`foo []`},
+		{`12 {}`},
+	}
+
+	for _, k := range errKeys {
+		p := newParser([]byte(k.src))
+		_, err := p.objectKey()
+		if err == nil {
+			t.Errorf("case '%s' should give an error", k.src)
+		}
+	}
+}
+
+func TestCommentGroup(t *testing.T) {
+	var cases = []struct {
+		src    string
+		groups int
+	}{
+		{"# Hello\n# World", 1},
+		{"# Hello\r\n# Windows", 1},
+	}
+
+	for _, tc := range cases {
+		t.Run(tc.src, func(t *testing.T) {
+			p := newParser([]byte(tc.src))
+			file, err := p.Parse()
+			if err != nil {
+				t.Fatalf("parse error: %s", err)
+			}
+
+			if len(file.Comments) != tc.groups {
+				t.Fatalf("bad: %#v", file.Comments)
+			}
+		})
+	}
+}
+
+// Official HCL tests
+func TestParse(t *testing.T) {
+	cases := []struct {
+		Name string
+		Err  bool
+	}{
+		{
+			"assign_colon.hcl",
+			true,
+		},
+		{
+			"comment.hcl",
+			false,
+		},
+		{
+			"comment_crlf.hcl",
+			false,
+		},
+		{
+			"comment_lastline.hcl",
+			false,
+		},
+		{
+			"comment_single.hcl",
+			false,
+		},
+		{
+			"empty.hcl",
+			false,
+		},
+		{
+			"list_comma.hcl",
+			false,
+		},
+		{
+			"multiple.hcl",
+			false,
+		},
+		{
+			"object_list_comma.hcl",
+			false,
+		},
+		{
+			"structure.hcl",
+			false,
+		},
+		{
+			"structure_basic.hcl",
+			false,
+		},
+		{
+			"structure_empty.hcl",
+			false,
+		},
+		{
+			"complex.hcl",
+			false,
+		},
+		{
+			"complex_crlf.hcl",
+			false,
+		},
+		{
+			"types.hcl",
+			false,
+		},
+		{
+			"array_comment.hcl",
+			false,
+		},
+		{
+			"array_comment_2.hcl",
+			true,
+		},
+		{
+			"missing_braces.hcl",
+			true,
+		},
+		{
+			"unterminated_object.hcl",
+			true,
+		},
+		{
+			"unterminated_object_2.hcl",
+			true,
+		},
+		{
+			"key_without_value.hcl",
+			true,
+		},
+		{
+			"object_key_without_value.hcl",
+			true,
+		},
+		{
+			"object_key_assign_without_value.hcl",
+			true,
+		},
+		{
+			"object_key_assign_without_value2.hcl",
+			true,
+		},
+		{
+			"object_key_assign_without_value3.hcl",
+			true,
+		},
+		{
+			"git_crypt.hcl",
+			true,
+		},
+	}
+
+	const fixtureDir = "./test-fixtures"
+
+	for _, tc := range cases {
+		t.Run(tc.Name, func(t *testing.T) {
+			d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+			if err != nil {
+				t.Fatalf("err: %s", err)
+			}
+
+			v, err := Parse(d)
+			if (err != nil) != tc.Err {
+				t.Fatalf("Input: %s\n\nError: %s\n\nAST: %#v", tc.Name, err, v)
+			}
+		})
+	}
+}
+
+func TestParse_inline(t *testing.T) {
+	cases := []struct {
+		Value string
+		Err   bool
+	}{
+		{"t t e{{}}", true},
+		{"o{{}}", true},
+		{"t t e d N{{}}", true},
+		{"t t e d{{}}", true},
+		{"N{}N{{}}", true},
+		{"v\nN{{}}", true},
+		{"v=/\n[,", true},
+		{"v=10kb", true},
+		{"v=/foo", true},
+	}
+
+	for _, tc := range cases {
+		t.Logf("Testing: %q", tc.Value)
+		ast, err := Parse([]byte(tc.Value))
+		if (err != nil) != tc.Err {
+			t.Fatalf("Input: %q\n\nError: %s\n\nAST: %#v", tc.Value, err, ast)
+		}
+	}
+}
+
+// equals fails the test if exp is not equal to act.
+func equals(tb testing.TB, exp, act interface{}) {
+	if !reflect.DeepEqual(exp, act) {
+		_, file, line, _ := runtime.Caller(1)
+		fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
+		tb.FailNow()
+	}
+}
diff --git a/hcl/parser/test-fixtures/array_comment.hcl b/hcl/parser/test-fixtures/array_comment.hcl
new file mode 100644
index 0000000..78c2675
--- /dev/null
+++ b/hcl/parser/test-fixtures/array_comment.hcl
@@ -0,0 +1,4 @@
+foo = [
+    "1",
+    "2", # comment
+]
diff --git a/hcl/parser/test-fixtures/array_comment_2.hcl b/hcl/parser/test-fixtures/array_comment_2.hcl
new file mode 100644
index 0000000..f916677
--- /dev/null
+++ b/hcl/parser/test-fixtures/array_comment_2.hcl
@@ -0,0 +1,6 @@
+provisioner "remote-exec" {
+  scripts = [
+    "${path.module}/scripts/install-consul.sh" // missing comma
+    "${path.module}/scripts/install-haproxy.sh"
+  ] 
+}
diff --git a/hcl/parser/test-fixtures/assign_colon.hcl b/hcl/parser/test-fixtures/assign_colon.hcl
new file mode 100644
index 0000000..eb5a99a
--- /dev/null
+++ b/hcl/parser/test-fixtures/assign_colon.hcl
@@ -0,0 +1,6 @@
+resource = [{
+	"foo": {
+		"bar": {},
+		"baz": [1, 2, "foo"],
+	}
+}]
diff --git a/hcl/parser/test-fixtures/assign_deep.hcl b/hcl/parser/test-fixtures/assign_deep.hcl
new file mode 100644
index 0000000..dd3151c
--- /dev/null
+++ b/hcl/parser/test-fixtures/assign_deep.hcl
@@ -0,0 +1,5 @@
+resource = [{

+	foo = [{

+		bar = {}

+	}]

+}]

diff --git a/hcl/parser/test-fixtures/comment.hcl b/hcl/parser/test-fixtures/comment.hcl
new file mode 100644
index 0000000..e32be87
--- /dev/null
+++ b/hcl/parser/test-fixtures/comment.hcl
@@ -0,0 +1,15 @@
+// Foo
+
+/* Bar */
+
+/*
+/*
+Baz
+*/
+
+# Another
+
+# Multiple
+# Lines
+
+foo = "bar"
diff --git a/hcl/parser/test-fixtures/comment_crlf.hcl b/hcl/parser/test-fixtures/comment_crlf.hcl
new file mode 100644
index 0000000..1ff7f29
--- /dev/null
+++ b/hcl/parser/test-fixtures/comment_crlf.hcl
@@ -0,0 +1,15 @@
+// Foo

+

+/* Bar */

+

+/*

+/*

+Baz

+*/

+

+# Another

+

+# Multiple

+# Lines

+

+foo = "bar"

diff --git a/hcl/parser/test-fixtures/comment_lastline.hcl b/hcl/parser/test-fixtures/comment_lastline.hcl
new file mode 100644
index 0000000..5529b9b
--- /dev/null
+++ b/hcl/parser/test-fixtures/comment_lastline.hcl
@@ -0,0 +1 @@
+#foo
\ No newline at end of file
diff --git a/hcl/parser/test-fixtures/comment_single.hcl b/hcl/parser/test-fixtures/comment_single.hcl
new file mode 100644
index 0000000..fec5601
--- /dev/null
+++ b/hcl/parser/test-fixtures/comment_single.hcl
@@ -0,0 +1 @@
+# Hello
diff --git a/hcl/parser/test-fixtures/complex.hcl b/hcl/parser/test-fixtures/complex.hcl
new file mode 100644
index 0000000..13b3c27
--- /dev/null
+++ b/hcl/parser/test-fixtures/complex.hcl
@@ -0,0 +1,42 @@
+variable "foo" {
+	default = "bar"
+	description = "bar"
+}
+
+variable "groups" { }
+
+provider "aws" {
+	access_key = "foo"
+	secret_key = "bar"
+}
+
+provider "do" {
+	api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+	count = 5
+}
+
+resource aws_instance "web" {
+	ami = "${var.foo}"
+	security_groups = [
+		"foo",
+		"${aws_security_group.firewall.foo}",
+		"${element(split(\",\", var.groups)}",
+	]
+	network_interface = {
+		device_index = 0
+		description = "Main network interface"
+	}
+}
+
+resource "aws_instance" "db" {
+	security_groups = "${aws_security_group.firewall.*.id}"
+	VPC = "foo"
+	depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+	value = "${aws_instance.web.private_ip}"
+}
diff --git a/hcl/parser/test-fixtures/complex_crlf.hcl b/hcl/parser/test-fixtures/complex_crlf.hcl
new file mode 100644
index 0000000..9b071d1
--- /dev/null
+++ b/hcl/parser/test-fixtures/complex_crlf.hcl
@@ -0,0 +1,42 @@
+variable "foo" {

+	default = "bar"

+	description = "bar"

+}

+

+variable "groups" { }

+

+provider "aws" {

+	access_key = "foo"

+	secret_key = "bar"

+}

+

+provider "do" {

+	api_key = "${var.foo}"

+}

+

+resource "aws_security_group" "firewall" {

+	count = 5

+}

+

+resource aws_instance "web" {

+	ami = "${var.foo}"

+	security_groups = [

+		"foo",

+		"${aws_security_group.firewall.foo}",

+		"${element(split(\",\", var.groups)}",

+	]

+	network_interface = {

+		device_index = 0

+		description = "Main network interface"

+	}

+}

+

+resource "aws_instance" "db" {

+	security_groups = "${aws_security_group.firewall.*.id}"

+	VPC = "foo"

+	depends_on = ["aws_instance.web"]

+}

+

+output "web_ip" {

+	value = "${aws_instance.web.private_ip}"

+}

diff --git a/hcl/parser/test-fixtures/complex_key.hcl b/hcl/parser/test-fixtures/complex_key.hcl
new file mode 100644
index 0000000..0007aaf
--- /dev/null
+++ b/hcl/parser/test-fixtures/complex_key.hcl
@@ -0,0 +1 @@
+foo.bar = "baz"
diff --git a/hcl/parser/test-fixtures/empty.hcl b/hcl/parser/test-fixtures/empty.hcl
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hcl/parser/test-fixtures/empty.hcl
diff --git a/hcl/parser/test-fixtures/git_crypt.hcl b/hcl/parser/test-fixtures/git_crypt.hcl
new file mode 100644
index 0000000..f691948
--- /dev/null
+++ b/hcl/parser/test-fixtures/git_crypt.hcl
Binary files differ
diff --git a/hcl/parser/test-fixtures/key_without_value.hcl b/hcl/parser/test-fixtures/key_without_value.hcl
new file mode 100644
index 0000000..257cc56
--- /dev/null
+++ b/hcl/parser/test-fixtures/key_without_value.hcl
@@ -0,0 +1 @@
+foo
diff --git a/hcl/parser/test-fixtures/list.hcl b/hcl/parser/test-fixtures/list.hcl
new file mode 100644
index 0000000..059d4ce
--- /dev/null
+++ b/hcl/parser/test-fixtures/list.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo"]

diff --git a/hcl/parser/test-fixtures/list_comma.hcl b/hcl/parser/test-fixtures/list_comma.hcl
new file mode 100644
index 0000000..50f4218
--- /dev/null
+++ b/hcl/parser/test-fixtures/list_comma.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo",]
diff --git a/hcl/parser/test-fixtures/missing_braces.hcl b/hcl/parser/test-fixtures/missing_braces.hcl
new file mode 100644
index 0000000..68e7274
--- /dev/null
+++ b/hcl/parser/test-fixtures/missing_braces.hcl
@@ -0,0 +1,4 @@
+# should error, but not crash
+resource "template_file" "cloud_config" {
+  template = "$file("${path.module}/some/path")"
+}
diff --git a/hcl/parser/test-fixtures/multiple.hcl b/hcl/parser/test-fixtures/multiple.hcl
new file mode 100644
index 0000000..029c54b
--- /dev/null
+++ b/hcl/parser/test-fixtures/multiple.hcl
@@ -0,0 +1,2 @@
+foo = "bar"

+key = 7

diff --git a/hcl/parser/test-fixtures/object_key_assign_without_value.hcl b/hcl/parser/test-fixtures/object_key_assign_without_value.hcl
new file mode 100644
index 0000000..37a2c7a
--- /dev/null
+++ b/hcl/parser/test-fixtures/object_key_assign_without_value.hcl
@@ -0,0 +1,3 @@
+foo {
+  bar =
+}
diff --git a/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl b/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
new file mode 100644
index 0000000..83ec5e6
--- /dev/null
+++ b/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
@@ -0,0 +1,4 @@
+foo {
+  baz = 7
+  bar =
+}
diff --git a/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl b/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
new file mode 100644
index 0000000..21136d1
--- /dev/null
+++ b/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
@@ -0,0 +1,4 @@
+foo {
+  bar =
+  baz = 7
+}
diff --git a/hcl/parser/test-fixtures/object_key_without_value.hcl b/hcl/parser/test-fixtures/object_key_without_value.hcl
new file mode 100644
index 0000000..a998731
--- /dev/null
+++ b/hcl/parser/test-fixtures/object_key_without_value.hcl
@@ -0,0 +1,3 @@
+foo {
+  bar
+}
diff --git a/hcl/parser/test-fixtures/object_list_comma.hcl b/hcl/parser/test-fixtures/object_list_comma.hcl
new file mode 100644
index 0000000..1921ec8
--- /dev/null
+++ b/hcl/parser/test-fixtures/object_list_comma.hcl
@@ -0,0 +1 @@
+foo = {one = 1, two = 2}
diff --git a/hcl/parser/test-fixtures/old.hcl b/hcl/parser/test-fixtures/old.hcl
new file mode 100644
index 0000000..e9f77ca
--- /dev/null
+++ b/hcl/parser/test-fixtures/old.hcl
@@ -0,0 +1,3 @@
+default = {
+    "eu-west-1": "ami-b1cf19c6",
+}
diff --git a/hcl/parser/test-fixtures/structure.hcl b/hcl/parser/test-fixtures/structure.hcl
new file mode 100644
index 0000000..92592fb
--- /dev/null
+++ b/hcl/parser/test-fixtures/structure.hcl
@@ -0,0 +1,5 @@
+// This is a test structure for the lexer
+foo bar "baz" {
+	key = 7
+	foo = "bar"
+}
diff --git a/hcl/parser/test-fixtures/structure_basic.hcl b/hcl/parser/test-fixtures/structure_basic.hcl
new file mode 100644
index 0000000..7229a1f
--- /dev/null
+++ b/hcl/parser/test-fixtures/structure_basic.hcl
@@ -0,0 +1,5 @@
+foo {

+	value = 7

+	"value" = 8

+	"complex::value" = 9

+}

diff --git a/hcl/parser/test-fixtures/structure_empty.hcl b/hcl/parser/test-fixtures/structure_empty.hcl
new file mode 100644
index 0000000..4d156dd
--- /dev/null
+++ b/hcl/parser/test-fixtures/structure_empty.hcl
@@ -0,0 +1 @@
+resource "foo" "bar" {}

diff --git a/hcl/parser/test-fixtures/types.hcl b/hcl/parser/test-fixtures/types.hcl
new file mode 100644
index 0000000..cf2747e
--- /dev/null
+++ b/hcl/parser/test-fixtures/types.hcl
@@ -0,0 +1,7 @@
+foo = "bar"

+bar = 7

+baz = [1,2,3]

+foo = -12

+bar = 3.14159

+foo = true

+bar = false

diff --git a/hcl/parser/test-fixtures/unterminated_object.hcl b/hcl/parser/test-fixtures/unterminated_object.hcl
new file mode 100644
index 0000000..31b37c4
--- /dev/null
+++ b/hcl/parser/test-fixtures/unterminated_object.hcl
@@ -0,0 +1,2 @@
+foo "baz" {
+    bar = "baz"
diff --git a/hcl/parser/test-fixtures/unterminated_object_2.hcl b/hcl/parser/test-fixtures/unterminated_object_2.hcl
new file mode 100644
index 0000000..294e36d
--- /dev/null
+++ b/hcl/parser/test-fixtures/unterminated_object_2.hcl
@@ -0,0 +1,6 @@
+resource "aws_eip" "EIP1" { a { a { a { a { a {
+            count = "1"
+
+resource "aws_eip" "EIP2" {
+      count = "1"
+}
diff --git a/hcl/printer/google_init_test.go b/hcl/printer/google_init_test.go
new file mode 100644
index 0000000..0b7faa9
--- /dev/null
+++ b/hcl/printer/google_init_test.go
@@ -0,0 +1,16 @@
+// This file contains google3 specific code to make tests work with blaze.
+
+package printer
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+func init() {
+	dir := filepath.Join(os.Getenv("TEST_SRCDIR"), "google3/third_party/golang/hashicorp/hcl/hcl/printer")
+	if err := os.Chdir(dir); err != nil {
+		panic(fmt.Sprintf("os.Chdir(%q): %v", dir, err))
+	}
+}
diff --git a/hcl/printer/nodes.go b/hcl/printer/nodes.go
new file mode 100644
index 0000000..677058e
--- /dev/null
+++ b/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+const (
+	blank    = byte(' ')
+	newline  = byte('\n')
+	tab      = byte('\t')
+	infinity = 1 << 30 // offset or line
+)
+
+var (
+	unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+	cfg  Config
+	prev token.Pos
+
+	comments           []*ast.CommentGroup // may be nil, contains all comments
+	standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+	enableTrace bool
+	indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int           { return len(b) }
+func (b ByPosition) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+	// first collect all comments. This is already stored in
+	// ast.File.(comments)
+	ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+		switch t := nn.(type) {
+		case *ast.File:
+			p.comments = t.Comments
+			return nn, false
+		}
+		return nn, true
+	})
+
+	standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+	for _, c := range p.comments {
+		standaloneComments[c.Pos()] = c
+	}
+
+	// next remove all lead and line comments from the overall comment map.
+	// This will give us comments which are standalone, comments which are not
+	// assigned to any kind of node.
+	ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+		switch t := nn.(type) {
+		case *ast.LiteralType:
+			if t.LeadComment != nil {
+				for _, comment := range t.LeadComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+
+			if t.LineComment != nil {
+				for _, comment := range t.LineComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+		case *ast.ObjectItem:
+			if t.LeadComment != nil {
+				for _, comment := range t.LeadComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+
+			if t.LineComment != nil {
+				for _, comment := range t.LineComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+		}
+
+		return nn, true
+	})
+
+	for _, c := range standaloneComments {
+		p.standaloneComments = append(p.standaloneComments, c)
+	}
+
+	sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+	var buf bytes.Buffer
+
+	switch t := n.(type) {
+	case *ast.File:
+		// File doesn't trace so we add the tracing here
+		defer un(trace(p, "File"))
+		return p.output(t.Node)
+	case *ast.ObjectList:
+		defer un(trace(p, "ObjectList"))
+
+		var index int
+		for {
+			// Determine the location of the next actual non-comment
+			// item. If we're at the end, the next item is at "infinity"
+			var nextItem token.Pos
+			if index != len(t.Items) {
+				nextItem = t.Items[index].Pos()
+			} else {
+				nextItem = token.Pos{Offset: infinity, Line: infinity}
+			}
+
+			// Go through the standalone comments in the file and print out
+			// the comments that we should be for this object item.
+			for _, c := range p.standaloneComments {
+				// Go through all the comments in the group. The group
+				// should be printed together, not separated by double newlines.
+				printed := false
+				newlinePrinted := false
+				for _, comment := range c.List {
+					// We only care about comments after the previous item
+					// we've printed so that comments are printed in the
+					// correct locations (between two objects for example).
+					// And before the next item.
+					if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+						// if we hit the end add newlines so we can print the comment
+						// we don't do this if prev is invalid which means the
+						// beginning of the file since the first comment should
+						// be at the first line.
+						if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+							buf.Write([]byte{newline, newline})
+							newlinePrinted = true
+						}
+
+						// Write the actual comment.
+						buf.WriteString(comment.Text)
+						buf.WriteByte(newline)
+
+						// Set printed to true to note that we printed something
+						printed = true
+					}
+				}
+
+				// If we're not at the last item, write a new line so
+				// that there is a newline separating this comment from
+				// the next object.
+				if printed && index != len(t.Items) {
+					buf.WriteByte(newline)
+				}
+			}
+
+			if index == len(t.Items) {
+				break
+			}
+
+			buf.Write(p.output(t.Items[index]))
+			if index != len(t.Items)-1 {
+				// Always write a newline to separate us from the next item
+				buf.WriteByte(newline)
+
+				// Need to determine if we're going to separate the next item
+				// with a blank line. The logic here is simple, though there
+				// are a few conditions:
+				//
+				//   1. The next object is more than one line away anyways,
+				//      so we need an empty line.
+				//
+				//   2. The next object is not a "single line" object, so
+				//      we need an empty line.
+				//
+				//   3. This current object is not a single line object,
+				//      so we need an empty line.
+				current := t.Items[index]
+				next := t.Items[index+1]
+				if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+					!p.isSingleLineObject(next) ||
+					!p.isSingleLineObject(current) {
+					buf.WriteByte(newline)
+				}
+			}
+			index++
+		}
+	case *ast.ObjectKey:
+		buf.WriteString(t.Token.Text)
+	case *ast.ObjectItem:
+		p.prev = t.Pos()
+		buf.Write(p.objectItem(t))
+	case *ast.LiteralType:
+		buf.Write(p.literalType(t))
+	case *ast.ListType:
+		buf.Write(p.list(t))
+	case *ast.ObjectType:
+		buf.Write(p.objectType(t))
+	default:
+		fmt.Printf(" unknown type: %T\n", n)
+	}
+
+	return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+	result := []byte(lit.Token.Text)
+	switch lit.Token.Type {
+	case token.HEREDOC:
+		// Clear the trailing newline from heredocs
+		if result[len(result)-1] == '\n' {
+			result = result[:len(result)-1]
+		}
+
+		// Poison lines 2+ so that we don't indent them
+		result = p.heredocIndent(result)
+	case token.STRING:
+		// If this is a multiline string, poison lines 2+ so we don't
+		// indent them.
+		if bytes.IndexRune(result, '\n') >= 0 {
+			result = p.heredocIndent(result)
+		}
+	}
+
+	return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+	defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+	var buf bytes.Buffer
+
+	if o.LeadComment != nil {
+		for _, comment := range o.LeadComment.List {
+			buf.WriteString(comment.Text)
+			buf.WriteByte(newline)
+		}
+	}
+
+	// If key and val are on different lines, treat line comments like lead comments.
+	if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+		for _, comment := range o.LineComment.List {
+			buf.WriteString(comment.Text)
+			buf.WriteByte(newline)
+		}
+	}
+
+	for i, k := range o.Keys {
+		buf.WriteString(k.Token.Text)
+		buf.WriteByte(blank)
+
+		// reach end of key
+		if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+			buf.WriteString("=")
+			buf.WriteByte(blank)
+		}
+	}
+
+	buf.Write(p.output(o.Val))
+
+	if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+		buf.WriteByte(blank)
+		for _, comment := range o.LineComment.List {
+			buf.WriteString(comment.Text)
+		}
+	}
+
+	return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+	defer un(trace(p, "ObjectType"))
+	var buf bytes.Buffer
+	buf.WriteString("{")
+
+	var index int
+	var nextItem token.Pos
+	var commented, newlinePrinted bool
+	for {
+		// Determine the location of the next actual non-comment
+		// item. If we're at the end, the next item is the closing brace
+		if index != len(o.List.Items) {
+			nextItem = o.List.Items[index].Pos()
+		} else {
+			nextItem = o.Rbrace
+		}
+
+		// Go through the standalone comments in the file and print out
+		// the comments that we should be for this object item.
+		for _, c := range p.standaloneComments {
+			printed := false
+			var lastCommentPos token.Pos
+			for _, comment := range c.List {
+				// We only care about comments after the previous item
+				// we've printed so that comments are printed in the
+				// correct locations (between two objects for example).
+				// And before the next item.
+				if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+					// If there are standalone comments and the initial newline has not
+					// been printed yet, do it now.
+					if !newlinePrinted {
+						newlinePrinted = true
+						buf.WriteByte(newline)
+					}
+
+					// add newline if it's between other printed nodes
+					if index > 0 {
+						commented = true
+						buf.WriteByte(newline)
+					}
+
+					// Store this position
+					lastCommentPos = comment.Pos()
+
+					// output the comment itself
+					buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+					// Set printed to true to note that we printed something
+					printed = true
+
+					/*
+						if index != len(o.List.Items) {
+							buf.WriteByte(newline) // do not print on the end
+						}
+					*/
+				}
+			}
+
+			// Stuff to do if we had comments
+			if printed {
+				// Always write a newline
+				buf.WriteByte(newline)
+
+				// If there is another item in the object and our comment
+				// didn't hug it directly, then make sure there is a blank
+				// line separating them.
+				if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+					buf.WriteByte(newline)
+				}
+			}
+		}
+
+		if index == len(o.List.Items) {
+			p.prev = o.Rbrace
+			break
+		}
+
+		// At this point we are sure that it's not a totally empty block: print
+		// the initial newline if it hasn't been printed yet by the previous
+		// block about standalone comments.
+		if !newlinePrinted {
+			buf.WriteByte(newline)
+			newlinePrinted = true
+		}
+
+		// check if we have adjacent one liner items. If yes we'll going to align
+		// the comments.
+		var aligned []*ast.ObjectItem
+		for _, item := range o.List.Items[index:] {
+			// we don't group one line lists
+			if len(o.List.Items) == 1 {
+				break
+			}
+
+			// one means a oneliner with out any lead comment
+			// two means a oneliner with lead comment
+			// anything else might be something else
+			cur := lines(string(p.objectItem(item)))
+			if cur > 2 {
+				break
+			}
+
+			curPos := item.Pos()
+
+			nextPos := token.Pos{}
+			if index != len(o.List.Items)-1 {
+				nextPos = o.List.Items[index+1].Pos()
+			}
+
+			prevPos := token.Pos{}
+			if index != 0 {
+				prevPos = o.List.Items[index-1].Pos()
+			}
+
+			// fmt.Println("DEBUG ----------------")
+			// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+			// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+			// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+			if curPos.Line+1 == nextPos.Line {
+				aligned = append(aligned, item)
+				index++
+				continue
+			}
+
+			if curPos.Line-1 == prevPos.Line {
+				aligned = append(aligned, item)
+				index++
+
+				// finish if we have a new line or comment next. This happens
+				// if the next item is not adjacent
+				if curPos.Line+1 != nextPos.Line {
+					break
+				}
+				continue
+			}
+
+			break
+		}
+
+		// put newlines if the items are between other non aligned items.
+		// newlines are also added if there is a standalone comment already, so
+		// check it too
+		if !commented && index != len(aligned) {
+			buf.WriteByte(newline)
+		}
+
+		if len(aligned) >= 1 {
+			p.prev = aligned[len(aligned)-1].Pos()
+
+			items := p.alignedItems(aligned)
+			buf.Write(p.indent(items))
+		} else {
+			p.prev = o.List.Items[index].Pos()
+
+			buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+			index++
+		}
+
+		buf.WriteByte(newline)
+	}
+
+	buf.WriteString("}")
+	return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+	var buf bytes.Buffer
+
+	// find the longest key and value length, needed for alignment
+	var longestKeyLen int // longest key length
+	var longestValLen int // longest value length
+	for _, item := range items {
+		key := len(item.Keys[0].Token.Text)
+		val := len(p.output(item.Val))
+
+		if key > longestKeyLen {
+			longestKeyLen = key
+		}
+
+		if val > longestValLen {
+			longestValLen = val
+		}
+	}
+
+	for i, item := range items {
+		if item.LeadComment != nil {
+			for _, comment := range item.LeadComment.List {
+				buf.WriteString(comment.Text)
+				buf.WriteByte(newline)
+			}
+		}
+
+		for i, k := range item.Keys {
+			keyLen := len(k.Token.Text)
+			buf.WriteString(k.Token.Text)
+			for i := 0; i < longestKeyLen-keyLen+1; i++ {
+				buf.WriteByte(blank)
+			}
+
+			// reach end of key
+			if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+				buf.WriteString("=")
+				buf.WriteByte(blank)
+			}
+		}
+
+		val := p.output(item.Val)
+		valLen := len(val)
+		buf.Write(val)
+
+		if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+			for i := 0; i < longestValLen-valLen+1; i++ {
+				buf.WriteByte(blank)
+			}
+
+			for _, comment := range item.LineComment.List {
+				buf.WriteString(comment.Text)
+			}
+		}
+
+		// do not print for the last item
+		if i != len(items)-1 {
+			buf.WriteByte(newline)
+		}
+	}
+
+	return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+	if p.isSingleLineList(l) {
+		return p.singleLineList(l)
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString("[")
+	buf.WriteByte(newline)
+
+	var longestLine int
+	for _, item := range l.List {
+		// for now we assume that the list only contains literal types
+		if lit, ok := item.(*ast.LiteralType); ok {
+			lineLen := len(lit.Token.Text)
+			if lineLen > longestLine {
+				longestLine = lineLen
+			}
+		}
+	}
+
+	haveEmptyLine := false
+	for i, item := range l.List {
+		// If we have a lead comment, then we want to write that first
+		leadComment := false
+		if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+			leadComment = true
+
+			// Ensure an empty line before every element with a
+			// lead comment (except the first item in a list).
+			if !haveEmptyLine && i != 0 {
+				buf.WriteByte(newline)
+			}
+
+			for _, comment := range lit.LeadComment.List {
+				buf.Write(p.indent([]byte(comment.Text)))
+				buf.WriteByte(newline)
+			}
+		}
+
+		// also indent each line
+		val := p.output(item)
+		curLen := len(val)
+		buf.Write(p.indent(val))
+
+		// if this item is a heredoc, then we output the comma on
+		// the next line. This is the only case this happens.
+		comma := []byte{','}
+		if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+			buf.WriteByte(newline)
+			comma = p.indent(comma)
+		}
+
+		buf.Write(comma)
+
+		if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+			// if the next item doesn't have any comments, do not align
+			buf.WriteByte(blank) // align one space
+			for i := 0; i < longestLine-curLen; i++ {
+				buf.WriteByte(blank)
+			}
+
+			for _, comment := range lit.LineComment.List {
+				buf.WriteString(comment.Text)
+			}
+		}
+
+		buf.WriteByte(newline)
+
+		// Ensure an empty line after every element with a
+		// lead comment (except the first item in a list).
+		haveEmptyLine = leadComment && i != len(l.List)-1
+		if haveEmptyLine {
+			buf.WriteByte(newline)
+		}
+	}
+
+	buf.WriteString("]")
+	return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+	for _, item := range l.List {
+		if item.Pos().Line != l.Lbrack.Line {
+			return false
+		}
+
+		lit, ok := item.(*ast.LiteralType)
+		if !ok {
+			return false
+		}
+
+		if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+			return false
+		}
+
+		if lit.LineComment != nil {
+			return false
+		}
+	}
+
+	return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+	buf := &bytes.Buffer{}
+
+	buf.WriteString("[")
+	for i, item := range l.List {
+		if i != 0 {
+			buf.WriteString(", ")
+		}
+
+		// Output the item itself
+		buf.Write(p.output(item))
+
+		// The heredoc marker needs to be at the end of line.
+		if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+			buf.WriteByte(newline)
+		}
+	}
+
+	buf.WriteString("]")
+	return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+	var prefix []byte
+	if p.cfg.SpacesWidth != 0 {
+		for i := 0; i < p.cfg.SpacesWidth; i++ {
+			prefix = append(prefix, blank)
+		}
+	} else {
+		prefix = []byte{tab}
+	}
+
+	var res []byte
+	bol := true
+	for _, c := range buf {
+		if bol && c != '\n' {
+			res = append(res, prefix...)
+		}
+
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+	var res []byte
+	for i := 0; i < len(buf); i++ {
+		skip := len(buf)-i <= len(unindent)
+		if !skip {
+			skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+		}
+		if skip {
+			res = append(res, buf[i])
+			continue
+		}
+
+		// We have a marker. we have to backtrace here and clean out
+		// any whitespace ahead of our tombstone up to a \n
+		for j := len(res) - 1; j >= 0; j-- {
+			if res[j] == '\n' {
+				break
+			}
+
+			res = res[:j]
+		}
+
+		// Skip the entire unindent marker
+		i += len(unindent) - 1
+	}
+
+	return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+	var res []byte
+	bol := false
+	for _, c := range buf {
+		if bol && c != '\n' {
+			res = append(res, unindent...)
+		}
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+//   * has no lead comments (hence multi-line)
+//   * has no assignment
+//   * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+	// If there is a lead comment, can't be one line
+	if val.LeadComment != nil {
+		return false
+	}
+
+	// If there is assignment, we always break by line
+	if val.Assign.IsValid() {
+		return false
+	}
+
+	// If it isn't an object type, then its not a single line object
+	ot, ok := val.Val.(*ast.ObjectType)
+	if !ok {
+		return false
+	}
+
+	// If the object has no items, it is single line!
+	return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+	endline := 1
+	for i := 0; i < len(txt); i++ {
+		if txt[i] == '\n' {
+			endline++
+		}
+	}
+	return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	i := 2 * p.indentTrace
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+	p.printTrace(msg, "(")
+	p.indentTrace++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+	p.indentTrace--
+	p.printTrace(")")
+}
diff --git a/hcl/printer/printer.go b/hcl/printer/printer.go
new file mode 100644
index 0000000..110d7f7
--- /dev/null
+++ b/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+	"bytes"
+	"io"
+	"text/tabwriter"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/hashicorp/hcl/hcl/parser/parser"
+)
+
+var DefaultConfig = Config{
+	SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+	SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+	p := &printer{
+		cfg:                *c,
+		comments:           make([]*ast.CommentGroup, 0),
+		standaloneComments: make([]*ast.CommentGroup, 0),
+		// enableTrace:        true,
+	}
+
+	p.collectComments(node)
+
+	if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+		return err
+	}
+
+	// flush tabwriter, if any
+	var err error
+	if tw, _ := output.(*tabwriter.Writer); tw != nil {
+		err = tw.Flush()
+	}
+
+	return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+	return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+	node, err := parser.Parse(src)
+	if err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	if err := DefaultConfig.Fprint(&buf, node); err != nil {
+		return nil, err
+	}
+
+	// Add trailing newline to result
+	buf.WriteString("\n")
+	return buf.Bytes(), nil
+}
diff --git a/hcl/printer/printer_test.go b/hcl/printer/printer_test.go
new file mode 100644
index 0000000..82d76c8
--- /dev/null
+++ b/hcl/printer/printer_test.go
@@ -0,0 +1,176 @@
+package printer
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+	"testing"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/parser/parser"
+)
+
+var update = flag.Bool("update", false, "update golden files")
+
+const (
+	dataDir = "testdata"
+)
+
+type entry struct {
+	source, golden string
+}
+
+// Use go test -update to create/update the respective golden files.
+var data = []entry{
+	{"complexhcl.input", "complexhcl.golden"},
+	{"list.input", "list.golden"},
+	{"list_comment.input", "list_comment.golden"},
+	{"comment.input", "comment.golden"},
+	{"comment_crlf.input", "comment.golden"},
+	{"comment_aligned.input", "comment_aligned.golden"},
+	{"comment_array.input", "comment_array.golden"},
+	{"comment_end_file.input", "comment_end_file.golden"},
+	{"comment_multiline_indent.input", "comment_multiline_indent.golden"},
+	{"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"},
+	{"comment_multiline_stanza.input", "comment_multiline_stanza.golden"},
+	{"comment_newline.input", "comment_newline.golden"},
+	{"comment_object_multi.input", "comment_object_multi.golden"},
+	{"comment_standalone.input", "comment_standalone.golden"},
+	{"empty_block.input", "empty_block.golden"},
+	{"list_of_objects.input", "list_of_objects.golden"},
+	{"multiline_string.input", "multiline_string.golden"},
+	{"object_singleline.input", "object_singleline.golden"},
+	{"object_with_heredoc.input", "object_with_heredoc.golden"},
+}
+
+func TestFiles(t *testing.T) {
+	for _, e := range data {
+		source := filepath.Join(dataDir, e.source)
+		golden := filepath.Join(dataDir, e.golden)
+		t.Run(e.source, func(t *testing.T) {
+			check(t, source, golden)
+		})
+	}
+}
+
+func check(t *testing.T, source, golden string) {
+	src, err := ioutil.ReadFile(source)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	res, err := format(src)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// update golden files if necessary
+	if *update {
+		if err := ioutil.WriteFile(golden, res, 0644); err != nil {
+			t.Error(err)
+		}
+		return
+	}
+
+	// get golden
+	gld, err := ioutil.ReadFile(golden)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// formatted source and golden must be the same
+	if err := diff(source, golden, res, gld); err != nil {
+		t.Error(err)
+		return
+	}
+}
+
+// diff compares a and b.
+func diff(aname, bname string, a, b []byte) error {
+	var buf bytes.Buffer // holding long error message
+
+	// compare lengths
+	if len(a) != len(b) {
+		fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
+	}
+
+	// compare contents
+	line := 1
+	offs := 1
+	for i := 0; i < len(a) && i < len(b); i++ {
+		ch := a[i]
+		if ch != b[i] {
+			fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs))
+			fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs))
+			fmt.Fprintf(&buf, "\n\n")
+			break
+		}
+		if ch == '\n' {
+			line++
+			offs = i + 1
+		}
+	}
+
+	if buf.Len() > 0 {
+		return errors.New(buf.String())
+	}
+	return nil
+}
+
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte) ([]byte, error) {
+	formatted, err := Format(src)
+	if err != nil {
+		return nil, err
+	}
+
+	// make sure formatted output is syntactically correct
+	if _, err := parser.Parse(formatted); err != nil {
+		return nil, fmt.Errorf("parse: %s\n%s", err, formatted)
+	}
+
+	return formatted, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+	i := offs
+	for i < len(text) && text[i] != '\n' {
+		i++
+	}
+	return text[offs:i]
+}
+
+// TestFormatParsable ensures that the output of Format() is can be parsed again.
+func TestFormatValidOutput(t *testing.T) {
+	cases := []string{
+		"#\x00",
+		"#\ue123t",
+		"x=//\n0y=<<_\n_\n",
+		"y=[1,//\n]",
+		"Y=<<4\n4/\n\n\n/4/@=4/\n\n\n/4000000004\r\r\n00004\n",
+		"x=<<_\n_\r\r\n_\n",
+		"X=<<-\n\r\r\n",
+	}
+
+	for _, c := range cases {
+		f, err := Format([]byte(c))
+		if err != nil {
+			// ignore these failures, not all inputs are valid HCL.
+			t.Logf("Format(%q) = %v", c, err)
+			continue
+		}
+
+		if _, err := parser.Parse(f); err != nil {
+			t.Errorf("Format(%q) = %q; Parse(%q) = %v", c, f, f, err)
+			continue
+		}
+	}
+}
diff --git a/hcl/printer/testdata/comment.golden b/hcl/printer/testdata/comment.golden
new file mode 100644
index 0000000..192c26a
--- /dev/null
+++ b/hcl/printer/testdata/comment.golden
@@ -0,0 +1,39 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+// This comes from Terraform, as a test
+variable "foo" {
+  # Standalone comment should be still here
+
+  default     = "bar"
+  description = "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = ["fatih", "arslan"] // fatih arslan
+
+# One line here
+numbers = [1, 2] // another line here
+
+# Another comment
+variable = {
+  description = "bar" # another yooo
+
+  foo {
+    # Nested standalone
+
+    bar = "fatih"
+  }
+}
+
+// lead comment
+foo {
+  bar = "fatih" // line comment 2 
+} // line comment 3
+
+// comment
+multiline = "assignment"
diff --git a/hcl/printer/testdata/comment.input b/hcl/printer/testdata/comment.input
new file mode 100644
index 0000000..c4b29de
--- /dev/null
+++ b/hcl/printer/testdata/comment.input
@@ -0,0 +1,39 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+   // This comes from Terraform, as a test
+variable "foo" {
+	# Standalone comment should be still here
+
+       default = "bar"
+    description =     "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = [     "fatih",       "arslan"] // fatih arslan
+
+# One line here
+numbers = [1,2] // another line here
+
+         # Another comment
+variable = {
+    description =     "bar" # another yooo
+    foo { 
+	# Nested standalone
+	
+        bar = "fatih"
+    } 
+}
+
+          // lead comment
+foo { 
+    bar = "fatih"       // line comment 2 
+}        // line comment 3
+
+multiline = // comment
+"assignment"
diff --git a/hcl/printer/testdata/comment_aligned.golden b/hcl/printer/testdata/comment_aligned.golden
new file mode 100644
index 0000000..6ff2150
--- /dev/null
+++ b/hcl/printer/testdata/comment_aligned.golden
@@ -0,0 +1,32 @@
+aligned {
+  # We have some aligned items below
+  foo     = "fatih"       # yoo1
+  default = "bar"         # yoo2
+  bar     = "bar and foo" # yoo3
+
+  default = {
+    bar = "example"
+  }
+
+  #deneme arslan
+  fatih = ["fatih"] # yoo4
+
+  #fatih arslan
+  fatiharslan = ["arslan"] // yoo5
+
+  default = {
+    bar = "example"
+  }
+
+  security_groups = [
+    "foo",                                # kenya 1
+    "${aws_security_group.firewall.foo}", # kenya 2
+  ]
+
+  security_groups2 = [
+    "foo",                                # kenya 1
+    "bar",                                # kenya 1.5
+    "${aws_security_group.firewall.foo}", # kenya 2
+    "foobar",                             # kenya 3
+  ]
+}
diff --git a/hcl/printer/testdata/comment_aligned.input b/hcl/printer/testdata/comment_aligned.input
new file mode 100644
index 0000000..bd43ab1
--- /dev/null
+++ b/hcl/printer/testdata/comment_aligned.input
@@ -0,0 +1,28 @@
+aligned {
+# We have some aligned items below
+   foo = "fatih" # yoo1
+   default = "bar" # yoo2
+   bar = "bar and foo" # yoo3
+   default  = {
+     bar = "example"
+   }
+  #deneme arslan
+   fatih = ["fatih"] # yoo4
+	#fatih arslan
+   fatiharslan = ["arslan"] // yoo5
+   default  = {
+     bar = "example"
+   }
+
+security_groups = [
+	"foo",    # kenya 1
+	"${aws_security_group.firewall.foo}", # kenya 2
+]
+
+security_groups2 = [
+	"foo",    # kenya 1
+	"bar",  # kenya 1.5
+	"${aws_security_group.firewall.foo}", # kenya 2
+	"foobar", # kenya 3
+]
+}
diff --git a/hcl/printer/testdata/comment_array.golden b/hcl/printer/testdata/comment_array.golden
new file mode 100644
index 0000000..e778eaf
--- /dev/null
+++ b/hcl/printer/testdata/comment_array.golden
@@ -0,0 +1,13 @@
+banana = [
+  # I really want to comment this item in the array.
+  "a",
+
+  # This as well
+  "b",
+
+  "c", # And C
+  "d",
+
+  # And another
+  "e",
+]
diff --git a/hcl/printer/testdata/comment_array.input b/hcl/printer/testdata/comment_array.input
new file mode 100644
index 0000000..e778eaf
--- /dev/null
+++ b/hcl/printer/testdata/comment_array.input
@@ -0,0 +1,13 @@
+banana = [
+  # I really want to comment this item in the array.
+  "a",
+
+  # This as well
+  "b",
+
+  "c", # And C
+  "d",
+
+  # And another
+  "e",
+]
diff --git a/hcl/printer/testdata/comment_crlf.input b/hcl/printer/testdata/comment_crlf.input
new file mode 100644
index 0000000..4955086
--- /dev/null
+++ b/hcl/printer/testdata/comment_crlf.input
@@ -0,0 +1,39 @@
+// A standalone comment is a comment which is not attached to any kind of node

+

+   // This comes from Terraform, as a test

+variable "foo" {

+	# Standalone comment should be still here

+

+       default = "bar"

+    description =     "bar" # yooo

+}

+

+/* This is a multi line standalone

+comment*/

+

+

+// fatih arslan

+/* This is a developer test

+account and a multine comment */

+developer = [     "fatih",       "arslan"] // fatih arslan

+

+# One line here

+numbers = [1,2] // another line here

+

+         # Another comment

+variable = {

+    description =     "bar" # another yooo

+    foo { 

+	# Nested standalone

+

+        bar = "fatih"

+    } 

+}

+

+          // lead comment

+foo { 

+    bar = "fatih"       // line comment 2 

+}        // line comment 3

+

+multiline = // comment

+"assignment"

diff --git a/hcl/printer/testdata/comment_end_file.golden b/hcl/printer/testdata/comment_end_file.golden
new file mode 100644
index 0000000..dbeae36
--- /dev/null
+++ b/hcl/printer/testdata/comment_end_file.golden
@@ -0,0 +1,6 @@
+resource "blah" "blah" {}
+
+//
+//
+//
+
diff --git a/hcl/printer/testdata/comment_end_file.input b/hcl/printer/testdata/comment_end_file.input
new file mode 100644
index 0000000..68c4c28
--- /dev/null
+++ b/hcl/printer/testdata/comment_end_file.input
@@ -0,0 +1,5 @@
+resource "blah" "blah" {}
+
+//
+//
+//
diff --git a/hcl/printer/testdata/comment_multiline_indent.golden b/hcl/printer/testdata/comment_multiline_indent.golden
new file mode 100644
index 0000000..74c4ccd
--- /dev/null
+++ b/hcl/printer/testdata/comment_multiline_indent.golden
@@ -0,0 +1,12 @@
+resource "provider" "resource" {
+  /*
+  SPACE_SENSITIVE_CODE = <<EOF
+yaml code:
+   foo: ""
+   bar: ""
+EOF
+  */
+  /*
+       OTHER
+                */
+}
diff --git a/hcl/printer/testdata/comment_multiline_indent.input b/hcl/printer/testdata/comment_multiline_indent.input
new file mode 100644
index 0000000..b07ac4d
--- /dev/null
+++ b/hcl/printer/testdata/comment_multiline_indent.input
@@ -0,0 +1,13 @@
+resource "provider" "resource" {
+  /*
+  SPACE_SENSITIVE_CODE = <<EOF
+yaml code:
+   foo: ""
+   bar: ""
+EOF
+  */
+
+  /*
+       OTHER
+                */
+}
diff --git a/hcl/printer/testdata/comment_multiline_no_stanza.golden b/hcl/printer/testdata/comment_multiline_no_stanza.golden
new file mode 100644
index 0000000..7ad7ca2
--- /dev/null
+++ b/hcl/printer/testdata/comment_multiline_no_stanza.golden
@@ -0,0 +1,7 @@
+# This is a multiline comment
+# That has values like this:
+#
+#     ami-abcd1234
+#
+# Do not delete this comment
+
diff --git a/hcl/printer/testdata/comment_multiline_no_stanza.input b/hcl/printer/testdata/comment_multiline_no_stanza.input
new file mode 100644
index 0000000..8b818e9
--- /dev/null
+++ b/hcl/printer/testdata/comment_multiline_no_stanza.input
@@ -0,0 +1,6 @@
+# This is a multiline comment
+# That has values like this:
+#
+#     ami-abcd1234
+#
+# Do not delete this comment
diff --git a/hcl/printer/testdata/comment_multiline_stanza.golden b/hcl/printer/testdata/comment_multiline_stanza.golden
new file mode 100644
index 0000000..e9db4f2
--- /dev/null
+++ b/hcl/printer/testdata/comment_multiline_stanza.golden
@@ -0,0 +1,10 @@
+# This is a multiline comment
+# That has values like this:
+#
+#     ami-abcd1234
+#
+# Do not delete this comment
+
+resource "aws_instance" "web" {
+  ami_id = "ami-abcd1234"
+}
diff --git a/hcl/printer/testdata/comment_multiline_stanza.input b/hcl/printer/testdata/comment_multiline_stanza.input
new file mode 100644
index 0000000..6a8b902
--- /dev/null
+++ b/hcl/printer/testdata/comment_multiline_stanza.input
@@ -0,0 +1,10 @@
+# This is a multiline comment
+# That has values like this:
+#
+#     ami-abcd1234
+#
+# Do not delete this comment
+
+resource "aws_instance" "web" {
+ami_id = "ami-abcd1234"
+}
diff --git a/hcl/printer/testdata/comment_newline.golden b/hcl/printer/testdata/comment_newline.golden
new file mode 100644
index 0000000..2162c88
--- /dev/null
+++ b/hcl/printer/testdata/comment_newline.golden
@@ -0,0 +1,3 @@
+# Hello
+# World
+
diff --git a/hcl/printer/testdata/comment_newline.input b/hcl/printer/testdata/comment_newline.input
new file mode 100644
index 0000000..aa56a98
--- /dev/null
+++ b/hcl/printer/testdata/comment_newline.input
@@ -0,0 +1,2 @@
+# Hello
+# World
diff --git a/hcl/printer/testdata/comment_object_multi.golden b/hcl/printer/testdata/comment_object_multi.golden
new file mode 100644
index 0000000..4c0f000
--- /dev/null
+++ b/hcl/printer/testdata/comment_object_multi.golden
@@ -0,0 +1,9 @@
+variable "environment" {
+  default = {}
+
+  # default {
+  #    "region" = "us-west-2"
+  #    "sg"     = "playground"
+  #    "env"    = "prod"
+  #  }
+}
diff --git a/hcl/printer/testdata/comment_object_multi.input b/hcl/printer/testdata/comment_object_multi.input
new file mode 100644
index 0000000..4c0f000
--- /dev/null
+++ b/hcl/printer/testdata/comment_object_multi.input
@@ -0,0 +1,9 @@
+variable "environment" {
+  default = {}
+
+  # default {
+  #    "region" = "us-west-2"
+  #    "sg"     = "playground"
+  #    "env"    = "prod"
+  #  }
+}
diff --git a/hcl/printer/testdata/comment_standalone.golden b/hcl/printer/testdata/comment_standalone.golden
new file mode 100644
index 0000000..3236d9e
--- /dev/null
+++ b/hcl/printer/testdata/comment_standalone.golden
@@ -0,0 +1,17 @@
+// A standalone comment 
+
+aligned {
+  # Standalone 1
+
+  a       = "bar" # yoo1
+  default = "bar" # yoo2
+
+  # Standalone 2
+}
+
+# Standalone 3
+
+numbers = [1, 2] // another line here
+
+# Standalone 4
+
diff --git a/hcl/printer/testdata/comment_standalone.input b/hcl/printer/testdata/comment_standalone.input
new file mode 100644
index 0000000..4436cb1
--- /dev/null
+++ b/hcl/printer/testdata/comment_standalone.input
@@ -0,0 +1,16 @@
+// A standalone comment 
+
+aligned {
+  # Standalone 1
+
+   a = "bar" # yoo1
+   default = "bar" # yoo2
+
+  # Standalone 2
+}
+
+  # Standalone 3
+
+numbers = [1,2] // another line here
+
+  # Standalone 4
diff --git a/hcl/printer/testdata/complexhcl.golden b/hcl/printer/testdata/complexhcl.golden
new file mode 100644
index 0000000..198c32d
--- /dev/null
+++ b/hcl/printer/testdata/complexhcl.golden
@@ -0,0 +1,54 @@
+variable "foo" {
+  default     = "bar"
+  description = "bar"
+}
+
+developer = ["fatih", "arslan"]
+
+provider "aws" {
+  access_key = "foo"
+  secret_key = "bar"
+}
+
+provider "do" {
+  api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+  count = 5
+}
+
+resource aws_instance "web" {
+  ami = "${var.foo}"
+
+  security_groups = [
+    "foo",
+    "${aws_security_group.firewall.foo}",
+  ]
+
+  network_interface {
+    device_index = 0
+    description  = "Main network interface"
+  }
+
+  network_interface = {
+    device_index = 1
+
+    description = <<EOF
+ANOTHER NETWORK INTERFACE
+EOF
+  }
+}
+
+resource "aws_instance" "db" {
+  security_groups = "${aws_security_group.firewall.*.id}"
+  VPC             = "foo"
+
+  depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+  value = <<EOF
+TUBES
+EOF
+}
diff --git a/hcl/printer/testdata/complexhcl.input b/hcl/printer/testdata/complexhcl.input
new file mode 100644
index 0000000..7123418
--- /dev/null
+++ b/hcl/printer/testdata/complexhcl.input
@@ -0,0 +1,53 @@
+variable "foo" {
+       default = "bar"
+    description =     "bar"
+}
+
+developer = [     "fatih",       "arslan"]
+
+provider "aws" {
+                            access_key ="foo"
+     secret_key =         "bar"
+}
+
+          provider "do" {
+  api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+          count = 5
+      }
+
+    resource aws_instance "web" {
+      ami = "${var.foo}"
+           security_groups = [
+            "foo",
+            "${aws_security_group.firewall.foo}"
+                  ]
+
+           network_interface {
+                 device_index = 0
+                    description = "Main network interface"
+        }
+
+           network_interface = {
+                 device_index = 1
+                    description = <<EOF
+ANOTHER NETWORK INTERFACE
+EOF
+        }
+    }
+
+resource "aws_instance" "db" {
+        security_groups = "${aws_security_group.firewall.*.id}"
+    VPC = "foo"
+
+    depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+
+    value=<<EOF
+TUBES
+EOF
+}
diff --git a/hcl/printer/testdata/empty_block.golden b/hcl/printer/testdata/empty_block.golden
new file mode 100644
index 0000000..4ff1cb3
--- /dev/null
+++ b/hcl/printer/testdata/empty_block.golden
@@ -0,0 +1,12 @@
+variable "foo" {}
+variable "foo" {}
+
+variable "foo" {
+  # Standalone comment should be still here
+}
+
+foo {}
+
+foo {
+  bar = "mssola"
+}
diff --git a/hcl/printer/testdata/empty_block.input b/hcl/printer/testdata/empty_block.input
new file mode 100644
index 0000000..627bf3e
--- /dev/null
+++ b/hcl/printer/testdata/empty_block.input
@@ -0,0 +1,14 @@
+variable "foo" {}
+variable "foo" {
+}
+
+variable "foo" {
+	# Standalone comment should be still here
+}
+
+foo {
+}
+
+foo {
+  bar = "mssola"
+}
diff --git a/hcl/printer/testdata/list.golden b/hcl/printer/testdata/list.golden
new file mode 100644
index 0000000..6894b44
--- /dev/null
+++ b/hcl/printer/testdata/list.golden
@@ -0,0 +1,46 @@
+foo = ["fatih", "arslan"]
+
+foo = ["bar", "qaz"]
+
+foo = [
+  "zeynep",
+  "arslan",
+]
+
+foo = [
+  "fatih",
+  "zeynep",
+  "arslan",
+]
+
+foo = [
+  "vim-go",
+  "golang",
+  "hcl",
+]
+
+foo = []
+
+foo = [1, 2, 3, 4]
+
+foo = [
+  "kenya",
+  "ethiopia",
+  "columbia",
+]
+
+foo = [
+  <<EOS
+one
+EOS
+  ,
+  <<EOS
+two
+EOS
+  ,
+]
+
+foo = [<<EOS
+one
+EOS
+]
diff --git a/hcl/printer/testdata/list.input b/hcl/printer/testdata/list.input
new file mode 100644
index 0000000..f55a382
--- /dev/null
+++ b/hcl/printer/testdata/list.input
@@ -0,0 +1,37 @@
+foo = ["fatih", "arslan"           ]
+
+foo = [        "bar", "qaz", ]
+
+foo = [             "zeynep",
+"arslan", ]
+
+foo = ["fatih", "zeynep",
+"arslan", ]
+
+foo = [
+	"vim-go",
+	"golang", "hcl"]
+
+foo = []
+
+foo = [1,   2,3,       4]
+
+foo = [
+	"kenya",        "ethiopia",
+	"columbia"]
+
+foo = [
+    <<EOS
+one
+EOS
+,
+    <<EOS
+two
+EOS
+,
+    ]
+
+foo = [<<EOS
+one
+EOS
+    ]
diff --git a/hcl/printer/testdata/list_comment.golden b/hcl/printer/testdata/list_comment.golden
new file mode 100644
index 0000000..35a848f
--- /dev/null
+++ b/hcl/printer/testdata/list_comment.golden
@@ -0,0 +1,13 @@
+foo = [
+  1, # Hello
+  2,
+]
+
+foo = [
+  1, # Hello
+  2, # World
+]
+
+foo = [
+  1, # Hello
+]
diff --git a/hcl/printer/testdata/list_comment.input b/hcl/printer/testdata/list_comment.input
new file mode 100644
index 0000000..c56aef2
--- /dev/null
+++ b/hcl/printer/testdata/list_comment.input
@@ -0,0 +1,9 @@
+foo = [1, # Hello
+2]
+
+foo = [1, # Hello
+2, # World
+]
+
+foo = [1, # Hello
+]
diff --git a/hcl/printer/testdata/list_of_objects.golden b/hcl/printer/testdata/list_of_objects.golden
new file mode 100644
index 0000000..401ded6
--- /dev/null
+++ b/hcl/printer/testdata/list_of_objects.golden
@@ -0,0 +1,10 @@
+list_of_objects = [
+  {
+    key1 = "value1"
+    key2 = "value2"
+  },
+  {
+    key3 = "value3"
+    key4 = "value4"
+  },
+]
diff --git a/hcl/printer/testdata/list_of_objects.input b/hcl/printer/testdata/list_of_objects.input
new file mode 100644
index 0000000..f2adcf0
--- /dev/null
+++ b/hcl/printer/testdata/list_of_objects.input
@@ -0,0 +1,10 @@
+list_of_objects = [
+    {
+        key1 = "value1"
+        key2 = "value2"
+    },
+    {
+        key3 = "value3"
+        key4 = "value4"
+    }
+]
\ No newline at end of file
diff --git a/hcl/printer/testdata/multiline_string.golden b/hcl/printer/testdata/multiline_string.golden
new file mode 100644
index 0000000..3d10c74
--- /dev/null
+++ b/hcl/printer/testdata/multiline_string.golden
@@ -0,0 +1,7 @@
+resource "null_resource" "some_command" {
+  provisioner "local-exec" {
+    command = "${echo '
+some newlines
+and additonal output'}"
+  }
+}
diff --git a/hcl/printer/testdata/multiline_string.input b/hcl/printer/testdata/multiline_string.input
new file mode 100644
index 0000000..3d10c74
--- /dev/null
+++ b/hcl/printer/testdata/multiline_string.input
@@ -0,0 +1,7 @@
+resource "null_resource" "some_command" {
+  provisioner "local-exec" {
+    command = "${echo '
+some newlines
+and additonal output'}"
+  }
+}
diff --git a/hcl/printer/testdata/object_singleline.golden b/hcl/printer/testdata/object_singleline.golden
new file mode 100644
index 0000000..c3d9147
--- /dev/null
+++ b/hcl/printer/testdata/object_singleline.golden
@@ -0,0 +1,26 @@
+variable "foo" {}
+variable "bar" {}
+variable "baz" {}
+
+variable "qux" {}
+
+variable "foo" {
+  foo = "bar"
+}
+
+variable "foo" {}
+
+# lead comment
+variable "bar" {}
+
+variable "foo" {
+  default = "bar"
+}
+
+variable "bar" {}
+
+# Purposeful newline check below:
+
+variable "foo" {}
+
+variable "purposeful-newline" {}
diff --git a/hcl/printer/testdata/object_singleline.input b/hcl/printer/testdata/object_singleline.input
new file mode 100644
index 0000000..7b34834
--- /dev/null
+++ b/hcl/printer/testdata/object_singleline.input
@@ -0,0 +1,19 @@
+variable "foo" {}
+variable "bar" {}
+variable "baz" {}
+
+variable "qux" {}
+variable "foo" { foo = "bar" }
+
+variable "foo" {}
+# lead comment
+variable "bar" {}
+
+variable "foo" { default = "bar" }
+variable "bar" {}
+
+# Purposeful newline check below:
+
+variable "foo" {}
+
+variable "purposeful-newline" {}
diff --git a/hcl/printer/testdata/object_with_heredoc.golden b/hcl/printer/testdata/object_with_heredoc.golden
new file mode 100644
index 0000000..a271d28
--- /dev/null
+++ b/hcl/printer/testdata/object_with_heredoc.golden
@@ -0,0 +1,7 @@
+obj {
+  foo = [<<EOF
+        TEXT!
+!!EOF
+EOF
+  ]
+}
diff --git a/hcl/printer/testdata/object_with_heredoc.input b/hcl/printer/testdata/object_with_heredoc.input
new file mode 100644
index 0000000..a35f24f
--- /dev/null
+++ b/hcl/printer/testdata/object_with_heredoc.input
@@ -0,0 +1,7 @@
+obj {
+    foo = [<<EOF
+        TEXT!
+!!EOF
+EOF
+    ]
+}
diff --git a/hcl/scanner/scanner.go b/hcl/scanner/scanner.go
new file mode 100644
index 0000000..3e96cd2
--- /dev/null
+++ b/hcl/scanner/scanner.go
@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"regexp"
+	"unicode"
+	"unicode/utf8"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == utf8.RuneError && size == 1 {
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	if ch == '\x00' {
+		s.err("unexpected null character (0x00)")
+		return eof
+	}
+
+	if ch == '\uE123' {
+		s.err("unicode code point U+E123 reserved for internal use")
+		return utf8.RuneError
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		tok = token.IDENT
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '#', '/':
+			tok = token.COMMENT
+			s.scanComment(ch)
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '<':
+			tok = token.HEREDOC
+			s.scanHeredoc()
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case '=':
+			tok = token.ASSIGN
+		case '+':
+			tok = token.ADD
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				tok = token.SUB
+			}
+		default:
+			s.err("illegal char")
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+func (s *Scanner) scanComment(ch rune) {
+	// single line comments
+	if ch == '#' || (ch == '/' && s.peek() != '*') {
+		if ch == '/' && s.peek() != '/' {
+			s.err("expected '/' for comment")
+			return
+		}
+
+		ch = s.next()
+		for ch != '\n' && ch >= 0 && ch != eof {
+			ch = s.next()
+		}
+		if ch != eof && ch >= 0 {
+			s.unread()
+		}
+		return
+	}
+
+	// be sure we get the character after /* This allows us to find comment's
+	// that are not erminated
+	if ch == '/' {
+		s.next()
+		ch = s.next() // read character after "/*"
+	}
+
+	// look for /* - style comments
+	for {
+		if ch < 0 || ch == eof {
+			s.err("comment not terminated")
+			break
+		}
+
+		ch0 := ch
+		ch = s.next()
+		if ch0 == '*' && ch == '/' {
+			break
+		}
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	if ch == '0' {
+		// check for hexadecimal, octal or float
+		ch = s.next()
+		if ch == 'x' || ch == 'X' {
+			// hexadecimal
+			ch = s.next()
+			found := false
+			for isHexadecimal(ch) {
+				ch = s.next()
+				found = true
+			}
+
+			if !found {
+				s.err("illegal hexadecimal number")
+			}
+
+			if ch != eof {
+				s.unread()
+			}
+
+			return token.NUMBER
+		}
+
+		// now it's either something like: 0421(octal) or 0.1231(float)
+		illegalOctal := false
+		for isDecimal(ch) {
+			ch = s.next()
+			if ch == '8' || ch == '9' {
+				// this is just a possibility. For example 0159 is illegal, but
+				// 0159.23 is valid. So we mark a possible illegal octal. If
+				// the next character is not a period, we'll print the error.
+				illegalOctal = true
+			}
+		}
+
+		if ch == 'e' || ch == 'E' {
+			ch = s.scanExponent(ch)
+			return token.FLOAT
+		}
+
+		if ch == '.' {
+			ch = s.scanFraction(ch)
+
+			if ch == 'e' || ch == 'E' {
+				ch = s.next()
+				ch = s.scanExponent(ch)
+			}
+			return token.FLOAT
+		}
+
+		if illegalOctal {
+			s.err("illegal octal number")
+		}
+
+		if ch != eof {
+			s.unread()
+		}
+		return token.NUMBER
+	}
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+	// Scan the second '<' in example: '<<EOF'
+	if s.next() != '<' {
+		s.err("heredoc expected second '<', didn't see it")
+		return
+	}
+
+	// Get the original offset so we can read just the heredoc ident
+	offs := s.srcPos.Offset
+
+	// Scan the identifier
+	ch := s.next()
+
+	// Indented heredoc syntax
+	if ch == '-' {
+		ch = s.next()
+	}
+
+	for isLetter(ch) || isDigit(ch) {
+		ch = s.next()
+	}
+
+	// If we reached an EOF then that is not good
+	if ch == eof {
+		s.err("heredoc not terminated")
+		return
+	}
+
+	// Ignore the '\r' in Windows line endings
+	if ch == '\r' {
+		if s.peek() == '\n' {
+			ch = s.next()
+		}
+	}
+
+	// If we didn't reach a newline then that is also not good
+	if ch != '\n' {
+		s.err("invalid characters in heredoc anchor")
+		return
+	}
+
+	// Read the identifier
+	identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+	if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
+		s.err("zero-length heredoc anchor")
+		return
+	}
+
+	var identRegexp *regexp.Regexp
+	if identBytes[0] == '-' {
+		identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
+	} else {
+		identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
+	}
+
+	// Read the actual string value
+	lineStart := s.srcPos.Offset
+	for {
+		ch := s.next()
+
+		// Special newline handling.
+		if ch == '\n' {
+			// Math is fast, so we first compare the byte counts to see if we have a chance
+			// of seeing the same identifier - if the length is less than the number of bytes
+			// in the identifier, this cannot be a valid terminator.
+			lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+			if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+				break
+			}
+
+			// Not an anchor match, record the start of a new line
+			lineStart = s.srcPos.Offset
+		}
+
+		if ch == eof {
+			s.err("heredoc not terminated")
+			return
+		}
+	}
+
+	return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' && braces == 0 {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	start := n
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		if ch == eof {
+			// If we see an EOF, we halt any more scanning of digits
+			// immediately.
+			break
+		}
+
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	if n != start && ch != eof {
+		// we scanned all digits, put the last non digit char back,
+		// only if we read anything at all
+		s.unread()
+	}
+
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
diff --git a/hcl/scanner/scanner_test.go b/hcl/scanner/scanner_test.go
new file mode 100644
index 0000000..4c42227
--- /dev/null
+++ b/hcl/scanner/scanner_test.go
@@ -0,0 +1,642 @@
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+
+	"strings"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+var f100 = strings.Repeat("f", 100)
+
+type tokenPair struct {
+	tok  token.Type
+	text string
+}
+
+var tokenLists = map[string][]tokenPair{
+	"comment": []tokenPair{
+		{token.COMMENT, "//"},
+		{token.COMMENT, "////"},
+		{token.COMMENT, "// comment"},
+		{token.COMMENT, "// /* comment */"},
+		{token.COMMENT, "// // comment //"},
+		{token.COMMENT, "//" + f100},
+		{token.COMMENT, "#"},
+		{token.COMMENT, "##"},
+		{token.COMMENT, "# comment"},
+		{token.COMMENT, "# /* comment */"},
+		{token.COMMENT, "# # comment #"},
+		{token.COMMENT, "#" + f100},
+		{token.COMMENT, "/**/"},
+		{token.COMMENT, "/***/"},
+		{token.COMMENT, "/* comment */"},
+		{token.COMMENT, "/* // comment */"},
+		{token.COMMENT, "/* /* comment */"},
+		{token.COMMENT, "/*\n comment\n*/"},
+		{token.COMMENT, "/*" + f100 + "*/"},
+	},
+	"operator": []tokenPair{
+		{token.LBRACK, "["},
+		{token.LBRACE, "{"},
+		{token.COMMA, ","},
+		{token.PERIOD, "."},
+		{token.RBRACK, "]"},
+		{token.RBRACE, "}"},
+		{token.ASSIGN, "="},
+		{token.ADD, "+"},
+		{token.SUB, "-"},
+	},
+	"bool": []tokenPair{
+		{token.BOOL, "true"},
+		{token.BOOL, "false"},
+	},
+	"ident": []tokenPair{
+		{token.IDENT, "a"},
+		{token.IDENT, "a0"},
+		{token.IDENT, "foobar"},
+		{token.IDENT, "foo-bar"},
+		{token.IDENT, "abc123"},
+		{token.IDENT, "LGTM"},
+		{token.IDENT, "_"},
+		{token.IDENT, "_abc123"},
+		{token.IDENT, "abc123_"},
+		{token.IDENT, "_abc_123_"},
+		{token.IDENT, "_äöü"},
+		{token.IDENT, "_本"},
+		{token.IDENT, "äöü"},
+		{token.IDENT, "本"},
+		{token.IDENT, "aÛ°Û±Û¸"},
+		{token.IDENT, "foo६४"},
+		{token.IDENT, "bar9876"},
+	},
+	"heredoc": []tokenPair{
+		{token.HEREDOC, "<<EOF\nhello\nworld\nEOF"},
+		{token.HEREDOC, "<<EOF123\nhello\nworld\nEOF123"},
+	},
+	"string": []tokenPair{
+		{token.STRING, `" "`},
+		{token.STRING, `"a"`},
+		{token.STRING, `"本"`},
+		{token.STRING, `"${file("foo")}"`},
+		{token.STRING, `"${file(\"foo\")}"`},
+		{token.STRING, `"\a"`},
+		{token.STRING, `"\b"`},
+		{token.STRING, `"\f"`},
+		{token.STRING, `"\n"`},
+		{token.STRING, `"\r"`},
+		{token.STRING, `"\t"`},
+		{token.STRING, `"\v"`},
+		{token.STRING, `"\""`},
+		{token.STRING, `"\000"`},
+		{token.STRING, `"\777"`},
+		{token.STRING, `"\x00"`},
+		{token.STRING, `"\xff"`},
+		{token.STRING, `"\u0000"`},
+		{token.STRING, `"\ufA16"`},
+		{token.STRING, `"\U00000000"`},
+		{token.STRING, `"\U0000ffAB"`},
+		{token.STRING, `"` + f100 + `"`},
+	},
+	"number": []tokenPair{
+		{token.NUMBER, "0"},
+		{token.NUMBER, "1"},
+		{token.NUMBER, "9"},
+		{token.NUMBER, "42"},
+		{token.NUMBER, "1234567890"},
+		{token.NUMBER, "00"},
+		{token.NUMBER, "01"},
+		{token.NUMBER, "07"},
+		{token.NUMBER, "042"},
+		{token.NUMBER, "01234567"},
+		{token.NUMBER, "0x0"},
+		{token.NUMBER, "0x1"},
+		{token.NUMBER, "0xf"},
+		{token.NUMBER, "0x42"},
+		{token.NUMBER, "0x123456789abcDEF"},
+		{token.NUMBER, "0x" + f100},
+		{token.NUMBER, "0X0"},
+		{token.NUMBER, "0X1"},
+		{token.NUMBER, "0XF"},
+		{token.NUMBER, "0X42"},
+		{token.NUMBER, "0X123456789abcDEF"},
+		{token.NUMBER, "0X" + f100},
+		{token.NUMBER, "-0"},
+		{token.NUMBER, "-1"},
+		{token.NUMBER, "-9"},
+		{token.NUMBER, "-42"},
+		{token.NUMBER, "-1234567890"},
+		{token.NUMBER, "-00"},
+		{token.NUMBER, "-01"},
+		{token.NUMBER, "-07"},
+		{token.NUMBER, "-29"},
+		{token.NUMBER, "-042"},
+		{token.NUMBER, "-01234567"},
+		{token.NUMBER, "-0x0"},
+		{token.NUMBER, "-0x1"},
+		{token.NUMBER, "-0xf"},
+		{token.NUMBER, "-0x42"},
+		{token.NUMBER, "-0x123456789abcDEF"},
+		{token.NUMBER, "-0x" + f100},
+		{token.NUMBER, "-0X0"},
+		{token.NUMBER, "-0X1"},
+		{token.NUMBER, "-0XF"},
+		{token.NUMBER, "-0X42"},
+		{token.NUMBER, "-0X123456789abcDEF"},
+		{token.NUMBER, "-0X" + f100},
+	},
+	"float": []tokenPair{
+		{token.FLOAT, "0."},
+		{token.FLOAT, "1."},
+		{token.FLOAT, "42."},
+		{token.FLOAT, "01234567890."},
+		{token.FLOAT, ".0"},
+		{token.FLOAT, ".1"},
+		{token.FLOAT, ".42"},
+		{token.FLOAT, ".0123456789"},
+		{token.FLOAT, "0.0"},
+		{token.FLOAT, "1.0"},
+		{token.FLOAT, "42.0"},
+		{token.FLOAT, "01234567890.0"},
+		{token.FLOAT, "0e0"},
+		{token.FLOAT, "1e0"},
+		{token.FLOAT, "42e0"},
+		{token.FLOAT, "01234567890e0"},
+		{token.FLOAT, "0E0"},
+		{token.FLOAT, "1E0"},
+		{token.FLOAT, "42E0"},
+		{token.FLOAT, "01234567890E0"},
+		{token.FLOAT, "0e+10"},
+		{token.FLOAT, "1e-10"},
+		{token.FLOAT, "42e+10"},
+		{token.FLOAT, "01234567890e-10"},
+		{token.FLOAT, "0E+10"},
+		{token.FLOAT, "1E-10"},
+		{token.FLOAT, "42E+10"},
+		{token.FLOAT, "01234567890E-10"},
+		{token.FLOAT, "01.8e0"},
+		{token.FLOAT, "1.4e0"},
+		{token.FLOAT, "42.2e0"},
+		{token.FLOAT, "01234567890.12e0"},
+		{token.FLOAT, "0.E0"},
+		{token.FLOAT, "1.12E0"},
+		{token.FLOAT, "42.123E0"},
+		{token.FLOAT, "01234567890.213E0"},
+		{token.FLOAT, "0.2e+10"},
+		{token.FLOAT, "1.2e-10"},
+		{token.FLOAT, "42.54e+10"},
+		{token.FLOAT, "01234567890.98e-10"},
+		{token.FLOAT, "0.1E+10"},
+		{token.FLOAT, "1.1E-10"},
+		{token.FLOAT, "42.1E+10"},
+		{token.FLOAT, "01234567890.1E-10"},
+		{token.FLOAT, "-0.0"},
+		{token.FLOAT, "-1.0"},
+		{token.FLOAT, "-42.0"},
+		{token.FLOAT, "-01234567890.0"},
+		{token.FLOAT, "-0e0"},
+		{token.FLOAT, "-1e0"},
+		{token.FLOAT, "-42e0"},
+		{token.FLOAT, "-01234567890e0"},
+		{token.FLOAT, "-0E0"},
+		{token.FLOAT, "-1E0"},
+		{token.FLOAT, "-42E0"},
+		{token.FLOAT, "-01234567890E0"},
+		{token.FLOAT, "-0e+10"},
+		{token.FLOAT, "-1e-10"},
+		{token.FLOAT, "-42e+10"},
+		{token.FLOAT, "-01234567890e-10"},
+		{token.FLOAT, "-0E+10"},
+		{token.FLOAT, "-1E-10"},
+		{token.FLOAT, "-42E+10"},
+		{token.FLOAT, "-01234567890E-10"},
+		{token.FLOAT, "-01.8e0"},
+		{token.FLOAT, "-1.4e0"},
+		{token.FLOAT, "-42.2e0"},
+		{token.FLOAT, "-01234567890.12e0"},
+		{token.FLOAT, "-0.E0"},
+		{token.FLOAT, "-1.12E0"},
+		{token.FLOAT, "-42.123E0"},
+		{token.FLOAT, "-01234567890.213E0"},
+		{token.FLOAT, "-0.2e+10"},
+		{token.FLOAT, "-1.2e-10"},
+		{token.FLOAT, "-42.54e+10"},
+		{token.FLOAT, "-01234567890.98e-10"},
+		{token.FLOAT, "-0.1E+10"},
+		{token.FLOAT, "-1.1E-10"},
+		{token.FLOAT, "-42.1E+10"},
+		{token.FLOAT, "-01234567890.1E-10"},
+	},
+}
+
+var orderedTokenLists = []string{
+	"comment",
+	"operator",
+	"bool",
+	"ident",
+	"heredoc",
+	"string",
+	"number",
+	"float",
+}
+
+func TestPosition(t *testing.T) {
+	// create artifical source code
+	buf := new(bytes.Buffer)
+
+	for _, listName := range orderedTokenLists {
+		for _, ident := range tokenLists[listName] {
+			fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
+		}
+	}
+
+	s := New(buf.Bytes())
+
+	pos := token.Pos{"", 4, 1, 5}
+	s.Scan()
+	for _, listName := range orderedTokenLists {
+
+		for _, k := range tokenLists[listName] {
+			curPos := s.tokPos
+			// fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
+
+			if curPos.Offset != pos.Offset {
+				t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
+			}
+			if curPos.Line != pos.Line {
+				t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
+			}
+			if curPos.Column != pos.Column {
+				t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
+			}
+			pos.Offset += 4 + len(k.text) + 1     // 4 tabs + token bytes + newline
+			pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+			s.Scan()
+		}
+	}
+	// make sure there were no token-internal errors reported by scanner
+	if s.ErrorCount != 0 {
+		t.Errorf("%d errors", s.ErrorCount)
+	}
+}
+
+func TestNullChar(t *testing.T) {
+	s := New([]byte("\"\\0"))
+	s.Scan() // Used to panic
+}
+
+func TestComment(t *testing.T) {
+	testTokenList(t, tokenLists["comment"])
+}
+
+func TestOperator(t *testing.T) {
+	testTokenList(t, tokenLists["operator"])
+}
+
+func TestBool(t *testing.T) {
+	testTokenList(t, tokenLists["bool"])
+}
+
+func TestIdent(t *testing.T) {
+	testTokenList(t, tokenLists["ident"])
+}
+
+func TestString(t *testing.T) {
+	testTokenList(t, tokenLists["string"])
+}
+
+func TestNumber(t *testing.T) {
+	testTokenList(t, tokenLists["number"])
+}
+
+func TestFloat(t *testing.T) {
+	testTokenList(t, tokenLists["float"])
+}
+
+func TestWindowsLineEndings(t *testing.T) {
+	hcl := `// This should have Windows line endings
+resource "aws_instance" "foo" {
+    user_data=<<HEREDOC
+    test script
+HEREDOC
+}`
+	hclWindowsEndings := strings.Replace(hcl, "\n", "\r\n", -1)
+
+	literals := []struct {
+		tokenType token.Type
+		literal   string
+	}{
+		{token.COMMENT, "// This should have Windows line endings\r"},
+		{token.IDENT, `resource`},
+		{token.STRING, `"aws_instance"`},
+		{token.STRING, `"foo"`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `user_data`},
+		{token.ASSIGN, `=`},
+		{token.HEREDOC, "<<HEREDOC\r\n    test script\r\nHEREDOC\r\n"},
+		{token.RBRACE, `}`},
+	}
+
+	s := New([]byte(hclWindowsEndings))
+	for _, l := range literals {
+		tok := s.Scan()
+
+		if l.tokenType != tok.Type {
+			t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+		}
+
+		if l.literal != tok.Text {
+			t.Errorf("got:\n%v\nwant:\n%v\n", []byte(tok.Text), []byte(l.literal))
+		}
+	}
+}
+
+func TestRealExample(t *testing.T) {
+	complexHCL := `// This comes from Terraform, as a test
+	variable "foo" {
+	    default = "bar"
+	    description = "bar"
+	}
+
+	provider "aws" {
+	  access_key = "foo"
+	  secret_key = "${replace(var.foo, ".", "\\.")}"
+	}
+
+	resource "aws_security_group" "firewall" {
+	    count = 5
+	}
+
+	resource aws_instance "web" {
+	    ami = "${var.foo}"
+	    security_groups = [
+	        "foo",
+	        "${aws_security_group.firewall.foo}"
+	    ]
+
+	    network_interface {
+	        device_index = 0
+	        description = <<EOF
+Main interface
+EOF
+	    }
+
+		network_interface {
+	        device_index = 1
+	        description = <<-EOF
+			Outer text
+				Indented text
+			EOF
+		}
+	}`
+
+	literals := []struct {
+		tokenType token.Type
+		literal   string
+	}{
+		{token.COMMENT, `// This comes from Terraform, as a test`},
+		{token.IDENT, `variable`},
+		{token.STRING, `"foo"`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `default`},
+		{token.ASSIGN, `=`},
+		{token.STRING, `"bar"`},
+		{token.IDENT, `description`},
+		{token.ASSIGN, `=`},
+		{token.STRING, `"bar"`},
+		{token.RBRACE, `}`},
+		{token.IDENT, `provider`},
+		{token.STRING, `"aws"`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `access_key`},
+		{token.ASSIGN, `=`},
+		{token.STRING, `"foo"`},
+		{token.IDENT, `secret_key`},
+		{token.ASSIGN, `=`},
+		{token.STRING, `"${replace(var.foo, ".", "\\.")}"`},
+		{token.RBRACE, `}`},
+		{token.IDENT, `resource`},
+		{token.STRING, `"aws_security_group"`},
+		{token.STRING, `"firewall"`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `count`},
+		{token.ASSIGN, `=`},
+		{token.NUMBER, `5`},
+		{token.RBRACE, `}`},
+		{token.IDENT, `resource`},
+		{token.IDENT, `aws_instance`},
+		{token.STRING, `"web"`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `ami`},
+		{token.ASSIGN, `=`},
+		{token.STRING, `"${var.foo}"`},
+		{token.IDENT, `security_groups`},
+		{token.ASSIGN, `=`},
+		{token.LBRACK, `[`},
+		{token.STRING, `"foo"`},
+		{token.COMMA, `,`},
+		{token.STRING, `"${aws_security_group.firewall.foo}"`},
+		{token.RBRACK, `]`},
+		{token.IDENT, `network_interface`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `device_index`},
+		{token.ASSIGN, `=`},
+		{token.NUMBER, `0`},
+		{token.IDENT, `description`},
+		{token.ASSIGN, `=`},
+		{token.HEREDOC, "<<EOF\nMain interface\nEOF\n"},
+		{token.RBRACE, `}`},
+		{token.IDENT, `network_interface`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `device_index`},
+		{token.ASSIGN, `=`},
+		{token.NUMBER, `1`},
+		{token.IDENT, `description`},
+		{token.ASSIGN, `=`},
+		{token.HEREDOC, "<<-EOF\n\t\t\tOuter text\n\t\t\t\tIndented text\n\t\t\tEOF\n"},
+		{token.RBRACE, `}`},
+		{token.RBRACE, `}`},
+		{token.EOF, ``},
+	}
+
+	s := New([]byte(complexHCL))
+	for _, l := range literals {
+		tok := s.Scan()
+		if l.tokenType != tok.Type {
+			t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+		}
+
+		if l.literal != tok.Text {
+			t.Errorf("got:\n%+v\n%s\n want:\n%+v\n%s\n", []byte(tok.String()), tok, []byte(l.literal), l.literal)
+		}
+	}
+
+}
+
+func TestScan_crlf(t *testing.T) {
+	complexHCL := "foo {\r\n  bar = \"baz\"\r\n}\r\n"
+
+	literals := []struct {
+		tokenType token.Type
+		literal   string
+	}{
+		{token.IDENT, `foo`},
+		{token.LBRACE, `{`},
+		{token.IDENT, `bar`},
+		{token.ASSIGN, `=`},
+		{token.STRING, `"baz"`},
+		{token.RBRACE, `}`},
+		{token.EOF, ``},
+	}
+
+	s := New([]byte(complexHCL))
+	for _, l := range literals {
+		tok := s.Scan()
+		if l.tokenType != tok.Type {
+			t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+		}
+
+		if l.literal != tok.Text {
+			t.Errorf("got:\n%+v\n%s\n want:\n%+v\n%s\n", []byte(tok.String()), tok, []byte(l.literal), l.literal)
+		}
+	}
+
+}
+
+func TestError(t *testing.T) {
+	testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+	testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+	testError(t, "\uE123", "1:1", "unicode code point U+E123 reserved for internal use", token.ILLEGAL)
+
+	testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", token.IDENT)
+	testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", token.IDENT)
+	testError(t, "ab\x00", "1:3", "unexpected null character (0x00)", token.IDENT)
+	testError(t, "ab\x00\n", "1:3", "unexpected null character (0x00)", token.IDENT)
+
+	testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
+	testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
+
+	testError(t, `01238`, "1:6", "illegal octal number", token.NUMBER)
+	testError(t, `01238123`, "1:9", "illegal octal number", token.NUMBER)
+	testError(t, `0x`, "1:3", "illegal hexadecimal number", token.NUMBER)
+	testError(t, `0xg`, "1:3", "illegal hexadecimal number", token.NUMBER)
+	testError(t, `'aa'`, "1:1", "illegal char", token.ILLEGAL)
+
+	testError(t, `"`, "1:2", "literal not terminated", token.STRING)
+	testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
+	testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
+	testError(t, `"${abc`+"\n", "2:1", "literal not terminated", token.STRING)
+	testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT)
+	testError(t, `/foo`, "1:1", "expected '/' for comment", token.COMMENT)
+
+	testError(t, "<<\nfoo\n\n", "1:3", "zero-length heredoc anchor", token.HEREDOC)
+	testError(t, "<<-\nfoo\n\n", "1:4", "zero-length heredoc anchor", token.HEREDOC)
+}
+
+func testError(t *testing.T, src, pos, msg string, tok token.Type) {
+	s := New([]byte(src))
+
+	errorCalled := false
+	s.Error = func(p token.Pos, m string) {
+		if !errorCalled {
+			if pos != p.String() {
+				t.Errorf("pos = %q, want %q for %q", p, pos, src)
+			}
+
+			if m != msg {
+				t.Errorf("msg = %q, want %q for %q", m, msg, src)
+			}
+			errorCalled = true
+		}
+	}
+
+	tk := s.Scan()
+	if tk.Type != tok {
+		t.Errorf("tok = %s, want %s for %q", tk, tok, src)
+	}
+	if !errorCalled {
+		t.Errorf("error handler not called for %q", src)
+	}
+	if s.ErrorCount == 0 {
+		t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+	}
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+	// create artifical source code
+	buf := new(bytes.Buffer)
+	for _, ident := range tokenList {
+		fmt.Fprintf(buf, "%s\n", ident.text)
+	}
+
+	s := New(buf.Bytes())
+	for _, ident := range tokenList {
+		tok := s.Scan()
+		if tok.Type != ident.tok {
+			t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+		}
+
+		if tok.Text != ident.text {
+			t.Errorf("text = %q want %q", tok.String(), ident.text)
+		}
+
+	}
+}
+
+func countNewlines(s string) int {
+	n := 0
+	for _, ch := range s {
+		if ch == '\n' {
+			n++
+		}
+	}
+	return n
+}
+
+func TestScanDigitsUnread(t *testing.T) {
+	cases := []string{
+		"M=0\"\\00",
+		"M=\"\\00",
+		"\"\\00",
+		"M=[\"\\00",
+		"U{\"\\00",
+		"\"\n{}#\n\"\\00",
+		"M=[[\"\\00",
+		"U{d=0\"\\U00",
+		"#\n\"\\x00",
+		"m=[[[\"\\00",
+	}
+
+	for _, c := range cases {
+		s := New([]byte(c))
+
+		for {
+			tok := s.Scan()
+			if tok.Type == token.EOF {
+				break
+			}
+			t.Logf("s.Scan() = %s", tok)
+		}
+	}
+}
+
+func TestScanHeredocRegexpCompile(t *testing.T) {
+	cases := []string{
+		"0\xe1\n<<ȸ\nhello\nworld\nȸ",
+	}
+
+	for _, c := range cases {
+		s := New([]byte(c))
+
+		for {
+			tok := s.Scan()
+			if tok.Type == token.EOF {
+				break
+			}
+			t.Logf("s.Scan() = %s", tok)
+		}
+	}
+}
diff --git a/hcl/strconv/quote.go b/hcl/strconv/quote.go
new file mode 100644
index 0000000..5f981ea
--- /dev/null
+++ b/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+	"errors"
+	"unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes.  (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+	n := len(s)
+	if n < 2 {
+		return "", ErrSyntax
+	}
+	quote := s[0]
+	if quote != s[n-1] {
+		return "", ErrSyntax
+	}
+	s = s[1 : n-1]
+
+	if quote != '"' {
+		return "", ErrSyntax
+	}
+	if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+		return "", ErrSyntax
+	}
+
+	// Is it trivial?  Avoid allocation.
+	if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+		switch quote {
+		case '"':
+			return s, nil
+		case '\'':
+			r, size := utf8.DecodeRuneInString(s)
+			if size == len(s) && (r != utf8.RuneError || size != 1) {
+				return s, nil
+			}
+		}
+	}
+
+	var runeTmp [utf8.UTFMax]byte
+	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+	for len(s) > 0 {
+		// If we're starting a '${}' then let it through un-unquoted.
+		// Specifically: we don't unquote any characters within the `${}`
+		// section.
+		if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+			buf = append(buf, '$', '{')
+			s = s[2:]
+
+			// Continue reading until we find the closing brace, copying as-is
+			braces := 1
+			for len(s) > 0 && braces > 0 {
+				r, size := utf8.DecodeRuneInString(s)
+				if r == utf8.RuneError {
+					return "", ErrSyntax
+				}
+
+				s = s[size:]
+
+				n := utf8.EncodeRune(runeTmp[:], r)
+				buf = append(buf, runeTmp[:n]...)
+
+				switch r {
+				case '{':
+					braces++
+				case '}':
+					braces--
+				}
+			}
+			if braces != 0 {
+				return "", ErrSyntax
+			}
+			if len(s) == 0 {
+				// If there's no string left, we're done!
+				break
+			} else {
+				// If there's more left, we need to pop back up to the top of the loop
+				// in case there's another interpolation in this string.
+				continue
+			}
+		}
+
+		if s[0] == '\n' {
+			return "", ErrSyntax
+		}
+
+		c, multibyte, ss, err := unquoteChar(s, quote)
+		if err != nil {
+			return "", err
+		}
+		s = ss
+		if c < utf8.RuneSelf || !multibyte {
+			buf = append(buf, byte(c))
+		} else {
+			n := utf8.EncodeRune(runeTmp[:], c)
+			buf = append(buf, runeTmp[:n]...)
+		}
+		if quote == '\'' && len(s) != 0 {
+			// single-quoted must be single character
+			return "", ErrSyntax
+		}
+	}
+	return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+	c := rune(b)
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0', true
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10, true
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10, true
+	}
+	return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+	// easy cases
+	switch c := s[0]; {
+	case c == quote && (quote == '\'' || quote == '"'):
+		err = ErrSyntax
+		return
+	case c >= utf8.RuneSelf:
+		r, size := utf8.DecodeRuneInString(s)
+		return r, true, s[size:], nil
+	case c != '\\':
+		return rune(s[0]), false, s[1:], nil
+	}
+
+	// hard case: c is backslash
+	if len(s) <= 1 {
+		err = ErrSyntax
+		return
+	}
+	c := s[1]
+	s = s[2:]
+
+	switch c {
+	case 'a':
+		value = '\a'
+	case 'b':
+		value = '\b'
+	case 'f':
+		value = '\f'
+	case 'n':
+		value = '\n'
+	case 'r':
+		value = '\r'
+	case 't':
+		value = '\t'
+	case 'v':
+		value = '\v'
+	case 'x', 'u', 'U':
+		n := 0
+		switch c {
+		case 'x':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		var v rune
+		if len(s) < n {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < n; j++ {
+			x, ok := unhex(s[j])
+			if !ok {
+				err = ErrSyntax
+				return
+			}
+			v = v<<4 | x
+		}
+		s = s[n:]
+		if c == 'x' {
+			// single-byte string, possibly not UTF-8
+			value = v
+			break
+		}
+		if v > utf8.MaxRune {
+			err = ErrSyntax
+			return
+		}
+		value = v
+		multibyte = true
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		v := rune(c) - '0'
+		if len(s) < 2 {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < 2; j++ { // one digit already; two more
+			x := rune(s[j]) - '0'
+			if x < 0 || x > 7 {
+				err = ErrSyntax
+				return
+			}
+			v = (v << 3) | x
+		}
+		s = s[2:]
+		if v > 255 {
+			err = ErrSyntax
+			return
+		}
+		value = v
+	case '\\':
+		value = '\\'
+	case '\'', '"':
+		if c != quote {
+			err = ErrSyntax
+			return
+		}
+		value = rune(c)
+	default:
+		err = ErrSyntax
+		return
+	}
+	tail = s
+	return
+}
diff --git a/hcl/strconv/quote_test.go b/hcl/strconv/quote_test.go
new file mode 100644
index 0000000..65be375
--- /dev/null
+++ b/hcl/strconv/quote_test.go
@@ -0,0 +1,96 @@
+package strconv
+
+import "testing"
+
+type quoteTest struct {
+	in    string
+	out   string
+	ascii string
+}
+
+var quotetests = []quoteTest{
+	{"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
+	{"\\", `"\\"`, `"\\"`},
+	{"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
+	{"\u263a", `"☺"`, `"\u263a"`},
+	{"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
+	{"\x04", `"\x04"`, `"\x04"`},
+}
+
+type unQuoteTest struct {
+	in  string
+	out string
+}
+
+var unquotetests = []unQuoteTest{
+	{`""`, ""},
+	{`"a"`, "a"},
+	{`"abc"`, "abc"},
+	{`"☺"`, "☺"},
+	{`"hello world"`, "hello world"},
+	{`"\xFF"`, "\xFF"},
+	{`"\377"`, "\377"},
+	{`"\u1234"`, "\u1234"},
+	{`"\U00010111"`, "\U00010111"},
+	{`"\U0001011111"`, "\U0001011111"},
+	{`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
+	{`"'"`, "'"},
+	{`"${file("foo")}"`, `${file("foo")}`},
+	{`"${file("\"foo\"")}"`, `${file("\"foo\"")}`},
+	{`"echo ${var.region}${element(split(",",var.zones),0)}"`,
+		`echo ${var.region}${element(split(",",var.zones),0)}`},
+	{`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`},
+	{`"${\n}"`, `${\n}`},
+}
+
+var misquoted = []string{
+	``,
+	`"`,
+	`"a`,
+	`"'`,
+	`b"`,
+	`"\"`,
+	`"\9"`,
+	`"\19"`,
+	`"\129"`,
+	`'\'`,
+	`'\9'`,
+	`'\19'`,
+	`'\129'`,
+	`'ab'`,
+	`"\x1!"`,
+	`"\U12345678"`,
+	`"\z"`,
+	"`",
+	"`xxx",
+	"`\"",
+	`"\'"`,
+	`'\"'`,
+	"\"\n\"",
+	"\"\\n\n\"",
+	"'\n'",
+	`"${"`,
+	`"${foo{}"`,
+	"\"${foo}\n\"",
+}
+
+func TestUnquote(t *testing.T) {
+	for _, tt := range unquotetests {
+		if out, err := Unquote(tt.in); err != nil || out != tt.out {
+			t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
+		}
+	}
+
+	// run the quote tests too, backward
+	for _, tt := range quotetests {
+		if in, err := Unquote(tt.out); in != tt.in {
+			t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
+		}
+	}
+
+	for _, s := range misquoted {
+		if out, err := Unquote(s); out != "" || err != ErrSyntax {
+			t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
+		}
+	}
+}
diff --git a/hcl/test-fixtures/array_comment.hcl b/hcl/test-fixtures/array_comment.hcl
new file mode 100644
index 0000000..78c2675
--- /dev/null
+++ b/hcl/test-fixtures/array_comment.hcl
@@ -0,0 +1,4 @@
+foo = [
+    "1",
+    "2", # comment
+]
diff --git a/hcl/test-fixtures/assign_colon.hcl b/hcl/test-fixtures/assign_colon.hcl
new file mode 100644
index 0000000..eb5a99a
--- /dev/null
+++ b/hcl/test-fixtures/assign_colon.hcl
@@ -0,0 +1,6 @@
+resource = [{
+	"foo": {
+		"bar": {},
+		"baz": [1, 2, "foo"],
+	}
+}]
diff --git a/hcl/test-fixtures/comment.hcl b/hcl/test-fixtures/comment.hcl
new file mode 100644
index 0000000..1ff7f29
--- /dev/null
+++ b/hcl/test-fixtures/comment.hcl
@@ -0,0 +1,15 @@
+// Foo

+

+/* Bar */

+

+/*

+/*

+Baz

+*/

+

+# Another

+

+# Multiple

+# Lines

+

+foo = "bar"

diff --git a/hcl/test-fixtures/comment_single.hcl b/hcl/test-fixtures/comment_single.hcl
new file mode 100644
index 0000000..fec5601
--- /dev/null
+++ b/hcl/test-fixtures/comment_single.hcl
@@ -0,0 +1 @@
+# Hello
diff --git a/hcl/test-fixtures/complex.hcl b/hcl/test-fixtures/complex.hcl
new file mode 100644
index 0000000..cccb5b0
--- /dev/null
+++ b/hcl/test-fixtures/complex.hcl
@@ -0,0 +1,42 @@
+// This comes from Terraform, as a test
+variable "foo" {
+    default = "bar"
+    description = "bar"
+}
+
+provider "aws" {
+  access_key = "foo"
+  secret_key = "bar"
+}
+
+provider "do" {
+  api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+    count = 5
+}
+
+resource aws_instance "web" {
+    ami = "${var.foo}"
+    security_groups = [
+        "foo",
+        "${aws_security_group.firewall.foo}"
+    ]
+
+    network_interface {
+        device_index = 0
+        description = "Main network interface"
+    }
+}
+
+resource "aws_instance" "db" {
+    security_groups = "${aws_security_group.firewall.*.id}"
+    VPC = "foo"
+
+    depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+    value = "${aws_instance.web.private_ip}"
+}
diff --git a/hcl/test-fixtures/complex_key.hcl b/hcl/test-fixtures/complex_key.hcl
new file mode 100644
index 0000000..0007aaf
--- /dev/null
+++ b/hcl/test-fixtures/complex_key.hcl
@@ -0,0 +1 @@
+foo.bar = "baz"
diff --git a/hcl/test-fixtures/empty.hcl b/hcl/test-fixtures/empty.hcl
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hcl/test-fixtures/empty.hcl
diff --git a/hcl/test-fixtures/list.hcl b/hcl/test-fixtures/list.hcl
new file mode 100644
index 0000000..059d4ce
--- /dev/null
+++ b/hcl/test-fixtures/list.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo"]

diff --git a/hcl/test-fixtures/list_comma.hcl b/hcl/test-fixtures/list_comma.hcl
new file mode 100644
index 0000000..50f4218
--- /dev/null
+++ b/hcl/test-fixtures/list_comma.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo",]
diff --git a/hcl/test-fixtures/multiple.hcl b/hcl/test-fixtures/multiple.hcl
new file mode 100644
index 0000000..029c54b
--- /dev/null
+++ b/hcl/test-fixtures/multiple.hcl
@@ -0,0 +1,2 @@
+foo = "bar"

+key = 7

diff --git a/hcl/test-fixtures/old.hcl b/hcl/test-fixtures/old.hcl
new file mode 100644
index 0000000..e9f77ca
--- /dev/null
+++ b/hcl/test-fixtures/old.hcl
@@ -0,0 +1,3 @@
+default = {
+    "eu-west-1": "ami-b1cf19c6",
+}
diff --git a/hcl/test-fixtures/structure.hcl b/hcl/test-fixtures/structure.hcl
new file mode 100644
index 0000000..92592fb
--- /dev/null
+++ b/hcl/test-fixtures/structure.hcl
@@ -0,0 +1,5 @@
+// This is a test structure for the lexer
+foo bar "baz" {
+	key = 7
+	foo = "bar"
+}
diff --git a/hcl/test-fixtures/structure_basic.hcl b/hcl/test-fixtures/structure_basic.hcl
new file mode 100644
index 0000000..7229a1f
--- /dev/null
+++ b/hcl/test-fixtures/structure_basic.hcl
@@ -0,0 +1,5 @@
+foo {

+	value = 7

+	"value" = 8

+	"complex::value" = 9

+}

diff --git a/hcl/test-fixtures/structure_empty.hcl b/hcl/test-fixtures/structure_empty.hcl
new file mode 100644
index 0000000..4d156dd
--- /dev/null
+++ b/hcl/test-fixtures/structure_empty.hcl
@@ -0,0 +1 @@
+resource "foo" "bar" {}

diff --git a/hcl/test-fixtures/types.hcl b/hcl/test-fixtures/types.hcl
new file mode 100644
index 0000000..cf2747e
--- /dev/null
+++ b/hcl/test-fixtures/types.hcl
@@ -0,0 +1,7 @@
+foo = "bar"

+bar = 7

+baz = [1,2,3]

+foo = -12

+bar = 3.14159

+foo = true

+bar = false

diff --git a/hcl/token/position.go b/hcl/token/position.go
new file mode 100644
index 0000000..59c1bb7
--- /dev/null
+++ b/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/hcl/token/token.go b/hcl/token/token.go
new file mode 100644
index 0000000..f8e48ab
--- /dev/null
+++ b/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	hclstrconv "google3/third_party/golang/hashicorp/hcl/hcl/strconv/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+	JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+	COMMENT
+
+	identifier_beg
+	IDENT // literals
+	literal_beg
+	NUMBER  // 12345
+	FLOAT   // 123.45
+	BOOL    // true,false
+	STRING  // "abc"
+	HEREDOC // <<FOO\nbar\nFOO
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+
+	RBRACK // ]
+	RBRACE // }
+
+	ASSIGN // =
+	ADD    // +
+	SUB    // -
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF:     "EOF",
+	COMMENT: "COMMENT",
+
+	IDENT:  "IDENT",
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+
+	LBRACK:  "LBRACK",
+	LBRACE:  "LBRACE",
+	COMMA:   "COMMA",
+	PERIOD:  "PERIOD",
+	HEREDOC: "HEREDOC",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+
+	ASSIGN: "ASSIGN",
+	ADD:    "ADD",
+	SUB:    "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+	switch t.Type {
+	case BOOL:
+		if t.Text == "true" {
+			return true
+		} else if t.Text == "false" {
+			return false
+		}
+
+		panic("unknown bool value: " + t.Text)
+	case FLOAT:
+		v, err := strconv.ParseFloat(t.Text, 64)
+		if err != nil {
+			panic(err)
+		}
+
+		return float64(v)
+	case NUMBER:
+		v, err := strconv.ParseInt(t.Text, 0, 64)
+		if err != nil {
+			panic(err)
+		}
+
+		return int64(v)
+	case IDENT:
+		return t.Text
+	case HEREDOC:
+		return unindentHeredoc(t.Text)
+	case STRING:
+		// Determine the Unquote method to use. If it came from JSON,
+		// then we need to use the built-in unquote since we have to
+		// escape interpolations there.
+		f := hclstrconv.Unquote
+		if t.JSON {
+			f = strconv.Unquote
+		}
+
+		// This case occurs if json null is used
+		if t.Text == "" {
+			return ""
+		}
+
+		v, err := f(t.Text)
+		if err != nil {
+			panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+		}
+
+		return v
+	default:
+		panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+	}
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+	// We need to find the end of the marker
+	idx := strings.IndexByte(heredoc, '\n')
+	if idx == -1 {
+		panic("heredoc doesn't contain newline")
+	}
+
+	unindent := heredoc[2] == '-'
+
+	// We can optimize if the heredoc isn't marked for indentation
+	if !unindent {
+		return string(heredoc[idx+1 : len(heredoc)-idx+1])
+	}
+
+	// We need to unindent each line based on the indentation level of the marker
+	lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+	whitespacePrefix := lines[len(lines)-1]
+
+	isIndented := true
+	for _, v := range lines {
+		if strings.HasPrefix(v, whitespacePrefix) {
+			continue
+		}
+
+		isIndented = false
+		break
+	}
+
+	// If all lines are not at least as indented as the terminating mark, return the
+	// heredoc as is, but trim the leading space from the marker on the final line.
+	if !isIndented {
+		return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+	}
+
+	unindentedLines := make([]string, len(lines))
+	for k, v := range lines {
+		if k == len(lines)-1 {
+			unindentedLines[k] = ""
+			break
+		}
+
+		unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+	}
+
+	return strings.Join(unindentedLines, "\n")
+}
diff --git a/hcl/token/token_test.go b/hcl/token/token_test.go
new file mode 100644
index 0000000..e4b4af2
--- /dev/null
+++ b/hcl/token/token_test.go
@@ -0,0 +1,69 @@
+package token
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestTypeString(t *testing.T) {
+	var tokens = []struct {
+		tt  Type
+		str string
+	}{
+		{ILLEGAL, "ILLEGAL"},
+		{EOF, "EOF"},
+		{COMMENT, "COMMENT"},
+		{IDENT, "IDENT"},
+		{NUMBER, "NUMBER"},
+		{FLOAT, "FLOAT"},
+		{BOOL, "BOOL"},
+		{STRING, "STRING"},
+		{HEREDOC, "HEREDOC"},
+		{LBRACK, "LBRACK"},
+		{LBRACE, "LBRACE"},
+		{COMMA, "COMMA"},
+		{PERIOD, "PERIOD"},
+		{RBRACK, "RBRACK"},
+		{RBRACE, "RBRACE"},
+		{ASSIGN, "ASSIGN"},
+		{ADD, "ADD"},
+		{SUB, "SUB"},
+	}
+
+	for _, token := range tokens {
+		if token.tt.String() != token.str {
+			t.Errorf("want: %q got:%q\n", token.str, token.tt)
+		}
+	}
+
+}
+
+func TestTokenValue(t *testing.T) {
+	var tokens = []struct {
+		tt Token
+		v  interface{}
+	}{
+		{Token{Type: BOOL, Text: `true`}, true},
+		{Token{Type: BOOL, Text: `false`}, false},
+		{Token{Type: FLOAT, Text: `3.14`}, float64(3.14)},
+		{Token{Type: NUMBER, Text: `42`}, int64(42)},
+		{Token{Type: IDENT, Text: `foo`}, "foo"},
+		{Token{Type: STRING, Text: `"foo"`}, "foo"},
+		{Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"},
+		{Token{Type: STRING, Text: `"${file("foo")}"`}, `${file("foo")}`},
+		{
+			Token{
+				Type: STRING,
+				Text: `"${replace("foo", ".", "\\.")}"`,
+			},
+			`${replace("foo", ".", "\\.")}`},
+		{Token{Type: HEREDOC, Text: "<<EOF\nfoo\nbar\nEOF"}, "foo\nbar"},
+	}
+
+	for _, token := range tokens {
+		if val := token.tt.Value(); !reflect.DeepEqual(val, token.v) {
+			t.Errorf("want: %v got:%v\n", token.v, val)
+		}
+	}
+
+}
diff --git a/hcl_test.go b/hcl_test.go
new file mode 100644
index 0000000..31dff7c
--- /dev/null
+++ b/hcl_test.go
@@ -0,0 +1,19 @@
+package hcl
+
+import (
+	"io/ioutil"
+	"path/filepath"
+	"testing"
+)
+
+// This is the directory where our test fixtures are.
+const fixtureDir = "./test-fixtures"
+
+func testReadFile(t *testing.T, n string) string {
+	d, err := ioutil.ReadFile(filepath.Join(fixtureDir, n))
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	return string(d)
+}
diff --git a/json/parser/flatten.go b/json/parser/flatten.go
new file mode 100644
index 0000000..0d8e95d
--- /dev/null
+++ b/json/parser/flatten.go
@@ -0,0 +1,117 @@
+package parser
+
+import "google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+	ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+		// We only care about lists, because this is what we modify
+		list, ok := n.(*ast.ObjectList)
+		if !ok {
+			return n, true
+		}
+
+		// Rebuild the item list
+		items := make([]*ast.ObjectItem, 0, len(list.Items))
+		frontier := make([]*ast.ObjectItem, len(list.Items))
+		copy(frontier, list.Items)
+		for len(frontier) > 0 {
+			// Pop the current item
+			n := len(frontier)
+			item := frontier[n-1]
+			frontier = frontier[:n-1]
+
+			switch v := item.Val.(type) {
+			case *ast.ObjectType:
+				items, frontier = flattenObjectType(v, item, items, frontier)
+			case *ast.ListType:
+				items, frontier = flattenListType(v, item, items, frontier)
+			default:
+				items = append(items, item)
+			}
+		}
+
+		// Reverse the list since the frontier model runs things backwards
+		for i := len(items)/2 - 1; i >= 0; i-- {
+			opp := len(items) - 1 - i
+			items[i], items[opp] = items[opp], items[i]
+		}
+
+		// Done! Set the original items
+		list.Items = items
+		return n, true
+	})
+}
+
+func flattenListType(
+	ot *ast.ListType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list is empty, keep the original list
+	if len(ot.List) == 0 {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List {
+		if _, ok := subitem.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, elem := range ot.List {
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        item.Keys,
+			Assign:      item.Assign,
+			Val:         elem,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
+
+func flattenObjectType(
+	ot *ast.ObjectType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list has no items we do not have to flatten anything
+	if ot.List.Items == nil {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List.Items {
+		if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, subitem := range ot.List.Items {
+		// Copy the new key
+		keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+		copy(keys, item.Keys)
+		copy(keys[len(item.Keys):], subitem.Keys)
+
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        keys,
+			Assign:      item.Assign,
+			Val:         subitem.Val,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
diff --git a/json/parser/google_init_test.go b/json/parser/google_init_test.go
new file mode 100644
index 0000000..de76519
--- /dev/null
+++ b/json/parser/google_init_test.go
@@ -0,0 +1,16 @@
+// This file contains google3 specific code to make tests work with blaze.
+
+package parser
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+func init() {
+	dir := filepath.Join(os.Getenv("TEST_SRCDIR"), "google3/third_party/golang/hashicorp/hcl/json/parser")
+	if err := os.Chdir(dir); err != nil {
+		panic(fmt.Sprintf("os.Chdir(%q): %v", dir, err))
+	}
+}
diff --git a/json/parser/parser.go b/json/parser/parser.go
new file mode 100644
index 0000000..379ccc6
--- /dev/null
+++ b/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+	"errors"
+	"fmt"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	hcltoken "google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+	"google3/third_party/golang/hashicorp/hcl/json/scanner/scanner"
+	"google3/third_party/golang/hashicorp/hcl/json/token/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = fmt.Errorf("%s: %s", pos, msg)
+	}
+
+	// The root must be an object in JSON
+	object, err := p.object()
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// We make our final node an object list so it is more HCL compatible
+	f.Node = object.List
+
+	// Flatten it, which finds patterns and turns them into more HCL-like
+	// AST trees.
+	flattenObjects(f.Node)
+
+	return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// Check for a followup comma. If it isn't a comma, then we're done
+		if tok := p.scan(); tok.Type != token.COMMA {
+			break
+		}
+	}
+
+	return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	switch p.tok.Type {
+	case token.COLON:
+		pos := p.tok.Pos
+		o.Assign = hcltoken.Pos{
+			Filename: pos.Filename,
+			Offset:   pos.Offset,
+			Line:     pos.Line,
+			Column:   pos.Column,
+		}
+
+		o.Val, err = p.objectValue()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			return nil, errEofToken
+		case token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{
+				Token: p.tok.HCLToken(),
+			})
+		case token.COLON:
+			// If we have a zero keycount it means that we never got
+			// an object key, i.e. `{ :`. This is a syntax error.
+			if keyCount == 0 {
+				return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+			}
+
+			// Done
+			return keys, nil
+		case token.ILLEGAL:
+			return nil, errors.New("illegal")
+		default:
+			return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+	defer un(trace(p, "ParseObjectValue"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.LBRACE:
+		return p.objectType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{}
+
+	l, err := p.objectList()
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	o.List = l
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{}
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.NUMBER, token.FLOAT, token.STRING:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.COMMA:
+			continue
+		case token.LBRACE:
+			node, err := p.objectType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.BOOL:
+			// TODO(arslan) should we support? not supported by HCL yet
+		case token.LBRACK:
+			// TODO(arslan) should we support nested lists? Even though it's
+			// written in README of HCL, it's not a part of the grammar
+			// (not defined in parse.y)
+		case token.RBRACK:
+			// finished
+			return l, nil
+		default:
+			return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+		}
+
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok.HCLToken(),
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	p.tok = p.sc.Scan()
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}
diff --git a/json/parser/parser_test.go b/json/parser/parser_test.go
new file mode 100644
index 0000000..f838a6c
--- /dev/null
+++ b/json/parser/parser_test.go
@@ -0,0 +1,384 @@
+package parser
+
+import (
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"testing"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	"google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+func TestType(t *testing.T) {
+	var literals = []struct {
+		typ token.Type
+		src string
+	}{
+		{token.STRING, `"foo": "bar"`},
+		{token.NUMBER, `"foo": 123`},
+		{token.FLOAT, `"foo": 123.12`},
+		{token.FLOAT, `"foo": -123.12`},
+		{token.BOOL, `"foo": true`},
+		{token.STRING, `"foo": null`},
+	}
+
+	for _, l := range literals {
+		t.Logf("Testing: %s", l.src)
+
+		p := newParser([]byte(l.src))
+		item, err := p.objectItem()
+		if err != nil {
+			t.Error(err)
+		}
+
+		lit, ok := item.Val.(*ast.LiteralType)
+		if !ok {
+			t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		if lit.Token.Type != l.typ {
+			t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
+		}
+	}
+}
+
+func TestListType(t *testing.T) {
+	var literals = []struct {
+		src    string
+		tokens []token.Type
+	}{
+		{
+			`"foo": ["123", 123]`,
+			[]token.Type{token.STRING, token.NUMBER},
+		},
+		{
+			`"foo": [123, "123",]`,
+			[]token.Type{token.NUMBER, token.STRING},
+		},
+		{
+			`"foo": []`,
+			[]token.Type{},
+		},
+		{
+			`"foo": ["123", 123]`,
+			[]token.Type{token.STRING, token.NUMBER},
+		},
+		{
+			`"foo": ["123", {}]`,
+			[]token.Type{token.STRING, token.LBRACE},
+		},
+	}
+
+	for _, l := range literals {
+		t.Logf("Testing: %s", l.src)
+
+		p := newParser([]byte(l.src))
+		item, err := p.objectItem()
+		if err != nil {
+			t.Error(err)
+		}
+
+		list, ok := item.Val.(*ast.ListType)
+		if !ok {
+			t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		tokens := []token.Type{}
+		for _, li := range list.List {
+			switch v := li.(type) {
+			case *ast.LiteralType:
+				tokens = append(tokens, v.Token.Type)
+			case *ast.ObjectType:
+				tokens = append(tokens, token.LBRACE)
+			}
+		}
+
+		equals(t, l.tokens, tokens)
+	}
+}
+
+func TestObjectType(t *testing.T) {
+	var literals = []struct {
+		src      string
+		nodeType []ast.Node
+		itemLen  int
+	}{
+		{
+			`"foo": {}`,
+			nil,
+			0,
+		},
+		{
+			`"foo": {
+				"bar": "fatih"
+			 }`,
+			[]ast.Node{&ast.LiteralType{}},
+			1,
+		},
+		{
+			`"foo": {
+				"bar": "fatih",
+				"baz": ["arslan"]
+			 }`,
+			[]ast.Node{
+				&ast.LiteralType{},
+				&ast.ListType{},
+			},
+			2,
+		},
+		{
+			`"foo": {
+				"bar": {}
+			 }`,
+			[]ast.Node{
+				&ast.ObjectType{},
+			},
+			1,
+		},
+		{
+			`"foo": {
+				"bar": {},
+				"foo": true
+			 }`,
+			[]ast.Node{
+				&ast.ObjectType{},
+				&ast.LiteralType{},
+			},
+			2,
+		},
+	}
+
+	for _, l := range literals {
+		t.Logf("Testing:\n%s\n", l.src)
+
+		p := newParser([]byte(l.src))
+		// p.enableTrace = true
+		item, err := p.objectItem()
+		if err != nil {
+			t.Error(err)
+		}
+
+		// we know that the ObjectKey name is foo for all cases, what matters
+		// is the object
+		obj, ok := item.Val.(*ast.ObjectType)
+		if !ok {
+			t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+		}
+
+		// check if the total length of items are correct
+		equals(t, l.itemLen, len(obj.List.Items))
+
+		// check if the types are correct
+		for i, item := range obj.List.Items {
+			equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+		}
+	}
+}
+
+func TestFlattenObjects(t *testing.T) {
+	var literals = []struct {
+		src      string
+		nodeType []ast.Node
+		itemLen  int
+	}{
+		{
+			`{
+					"foo": [
+						{
+							"foo": "svh",
+							"bar": "fatih"
+						}
+					]
+				}`,
+			[]ast.Node{
+				&ast.ObjectType{},
+				&ast.LiteralType{},
+				&ast.LiteralType{},
+			},
+			3,
+		},
+		{
+			`{
+					"variable": {
+						"foo": {}
+					}
+				}`,
+			[]ast.Node{
+				&ast.ObjectType{},
+			},
+			1,
+		},
+		{
+			`{
+				"empty": []
+			}`,
+			[]ast.Node{
+				&ast.ListType{},
+			},
+			1,
+		},
+		{
+			`{
+				"basic": [1, 2, 3]
+			}`,
+			[]ast.Node{
+				&ast.ListType{},
+			},
+			1,
+		},
+	}
+
+	for _, l := range literals {
+		t.Logf("Testing:\n%s\n", l.src)
+
+		f, err := Parse([]byte(l.src))
+		if err != nil {
+			t.Error(err)
+		}
+
+		// the first object is always an ObjectList so just assert that one
+		// so we can use it as such
+		obj, ok := f.Node.(*ast.ObjectList)
+		if !ok {
+			t.Errorf("node should be *ast.ObjectList, got: %T", f.Node)
+		}
+
+		// check if the types are correct
+		var i int
+		for _, item := range obj.Items {
+			equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+			i++
+
+			if obj, ok := item.Val.(*ast.ObjectType); ok {
+				for _, item := range obj.List.Items {
+					equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+					i++
+				}
+			}
+		}
+
+		// check if the number of items is correct
+		equals(t, l.itemLen, i)
+
+	}
+}
+
+func TestObjectKey(t *testing.T) {
+	keys := []struct {
+		exp []token.Type
+		src string
+	}{
+		{[]token.Type{token.STRING}, `"foo": {}`},
+	}
+
+	for _, k := range keys {
+		p := newParser([]byte(k.src))
+		keys, err := p.objectKey()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		tokens := []token.Type{}
+		for _, o := range keys {
+			tokens = append(tokens, o.Token.Type)
+		}
+
+		equals(t, k.exp, tokens)
+	}
+
+	errKeys := []struct {
+		src string
+	}{
+		{`foo 12 {}`},
+		{`foo bar = {}`},
+		{`foo []`},
+		{`12 {}`},
+	}
+
+	for _, k := range errKeys {
+		p := newParser([]byte(k.src))
+		_, err := p.objectKey()
+		if err == nil {
+			t.Errorf("case '%s' should give an error", k.src)
+		}
+	}
+}
+
+// Official HCL tests
+func TestParse(t *testing.T) {
+	cases := []struct {
+		Name string
+		Err  bool
+	}{
+		{
+			"array.json",
+			false,
+		},
+		{
+			"basic.json",
+			false,
+		},
+		{
+			"object.json",
+			false,
+		},
+		{
+			"types.json",
+			false,
+		},
+		{
+			"bad_input_128.json",
+			true,
+		},
+		{
+			"bad_input_tf_8110.json",
+			true,
+		},
+		{
+			"good_input_tf_8110.json",
+			false,
+		},
+	}
+
+	const fixtureDir = "./test-fixtures"
+
+	for _, tc := range cases {
+		d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+		if err != nil {
+			t.Fatalf("err: %s", err)
+		}
+
+		_, err = Parse(d)
+		if (err != nil) != tc.Err {
+			t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
+		}
+	}
+}
+
+func TestParse_inline(t *testing.T) {
+	cases := []struct {
+		Value string
+		Err   bool
+	}{
+		{"{:{", true},
+	}
+
+	for _, tc := range cases {
+		_, err := Parse([]byte(tc.Value))
+		if (err != nil) != tc.Err {
+			t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
+		}
+	}
+}
+
+// equals fails the test if exp is not equal to act.
+func equals(tb testing.TB, exp, act interface{}) {
+	if !reflect.DeepEqual(exp, act) {
+		_, file, line, _ := runtime.Caller(1)
+		fmt.Printf("\033[31m%s:%d:\n\n\texp: %s\n\n\tgot: %s\033[39m\n\n", filepath.Base(file), line, exp, act)
+		tb.FailNow()
+	}
+}
diff --git a/json/parser/test-fixtures/array.json b/json/parser/test-fixtures/array.json
new file mode 100644
index 0000000..e320f17
--- /dev/null
+++ b/json/parser/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{

+	"foo": [1, 2, "bar"],

+	"bar": "baz"

+}

diff --git a/json/parser/test-fixtures/bad_input_128.json b/json/parser/test-fixtures/bad_input_128.json
new file mode 100644
index 0000000..b5f850c
--- /dev/null
+++ b/json/parser/test-fixtures/bad_input_128.json
@@ -0,0 +1 @@
+{:{
diff --git a/json/parser/test-fixtures/bad_input_tf_8110.json b/json/parser/test-fixtures/bad_input_tf_8110.json
new file mode 100644
index 0000000..a043858
--- /dev/null
+++ b/json/parser/test-fixtures/bad_input_tf_8110.json
@@ -0,0 +1,7 @@
+{
+  "variable": {
+    "poc": {
+      "default": "${replace("europe-west", "-", " ")}"
+    }
+  }
+}
diff --git a/json/parser/test-fixtures/basic.json b/json/parser/test-fixtures/basic.json
new file mode 100644
index 0000000..b54bde9
--- /dev/null
+++ b/json/parser/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{

+	"foo": "bar"

+}

diff --git a/json/parser/test-fixtures/good_input_tf_8110.json b/json/parser/test-fixtures/good_input_tf_8110.json
new file mode 100644
index 0000000..f21aa09
--- /dev/null
+++ b/json/parser/test-fixtures/good_input_tf_8110.json
@@ -0,0 +1,7 @@
+{
+  "variable": {
+    "poc": {
+      "default": "${replace(\"europe-west\", \"-\", \" \")}"
+    }
+  }
+}
diff --git a/json/parser/test-fixtures/object.json b/json/parser/test-fixtures/object.json
new file mode 100644
index 0000000..72168a3
--- /dev/null
+++ b/json/parser/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{

+	"foo": {

+		"bar": [1,2]

+	}

+}

diff --git a/json/parser/test-fixtures/types.json b/json/parser/test-fixtures/types.json
new file mode 100644
index 0000000..9a142a6
--- /dev/null
+++ b/json/parser/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{

+	"foo": "bar",

+	"bar": 7,

+	"baz": [1,2,3],

+	"foo": -12,

+	"bar": 3.14159,

+    "foo": true,

+    "bar": false,

+    "foo": null

+}

diff --git a/json/scanner/scanner.go b/json/scanner/scanner.go
new file mode 100644
index 0000000..0f9f475
--- /dev/null
+++ b/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"unicode"
+	"unicode/utf8"
+
+	"google3/third_party/golang/hashicorp/hcl/json/token/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	if ch == utf8.RuneError && size == 1 {
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		} else if lit == "null" {
+			tok = token.NULL
+		} else {
+			s.err("illegal char")
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case ':':
+			tok = token.COLON
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				s.err("illegal char")
+			}
+		default:
+			s.err("illegal char: " + string(ch))
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	zero := ch == '0'
+	pos := s.srcPos
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+
+	// If we have a larger number and this is zero, error
+	if zero && pos != s.srcPos {
+		s.err("numbers cannot start with 0")
+	}
+
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if ch == '\n' || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	// we scanned all digits, put the last non digit char back
+	s.unread()
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
diff --git a/json/scanner/scanner_test.go b/json/scanner/scanner_test.go
new file mode 100644
index 0000000..4bb2a86
--- /dev/null
+++ b/json/scanner/scanner_test.go
@@ -0,0 +1,362 @@
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+
+	"google3/third_party/golang/hashicorp/hcl/json/token/token"
+)
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+type tokenPair struct {
+	tok  token.Type
+	text string
+}
+
+var tokenLists = map[string][]tokenPair{
+	"operator": []tokenPair{
+		{token.LBRACK, "["},
+		{token.LBRACE, "{"},
+		{token.COMMA, ","},
+		{token.PERIOD, "."},
+		{token.RBRACK, "]"},
+		{token.RBRACE, "}"},
+	},
+	"bool": []tokenPair{
+		{token.BOOL, "true"},
+		{token.BOOL, "false"},
+	},
+	"string": []tokenPair{
+		{token.STRING, `" "`},
+		{token.STRING, `"a"`},
+		{token.STRING, `"本"`},
+		{token.STRING, `"${file(\"foo\")}"`},
+		{token.STRING, `"\a"`},
+		{token.STRING, `"\b"`},
+		{token.STRING, `"\f"`},
+		{token.STRING, `"\n"`},
+		{token.STRING, `"\r"`},
+		{token.STRING, `"\t"`},
+		{token.STRING, `"\v"`},
+		{token.STRING, `"\""`},
+		{token.STRING, `"\000"`},
+		{token.STRING, `"\777"`},
+		{token.STRING, `"\x00"`},
+		{token.STRING, `"\xff"`},
+		{token.STRING, `"\u0000"`},
+		{token.STRING, `"\ufA16"`},
+		{token.STRING, `"\U00000000"`},
+		{token.STRING, `"\U0000ffAB"`},
+		{token.STRING, `"` + f100 + `"`},
+	},
+	"number": []tokenPair{
+		{token.NUMBER, "0"},
+		{token.NUMBER, "1"},
+		{token.NUMBER, "9"},
+		{token.NUMBER, "42"},
+		{token.NUMBER, "1234567890"},
+		{token.NUMBER, "-0"},
+		{token.NUMBER, "-1"},
+		{token.NUMBER, "-9"},
+		{token.NUMBER, "-42"},
+		{token.NUMBER, "-1234567890"},
+	},
+	"float": []tokenPair{
+		{token.FLOAT, "0."},
+		{token.FLOAT, "1."},
+		{token.FLOAT, "42."},
+		{token.FLOAT, "01234567890."},
+		{token.FLOAT, ".0"},
+		{token.FLOAT, ".1"},
+		{token.FLOAT, ".42"},
+		{token.FLOAT, ".0123456789"},
+		{token.FLOAT, "0.0"},
+		{token.FLOAT, "1.0"},
+		{token.FLOAT, "42.0"},
+		{token.FLOAT, "01234567890.0"},
+		{token.FLOAT, "0e0"},
+		{token.FLOAT, "1e0"},
+		{token.FLOAT, "42e0"},
+		{token.FLOAT, "01234567890e0"},
+		{token.FLOAT, "0E0"},
+		{token.FLOAT, "1E0"},
+		{token.FLOAT, "42E0"},
+		{token.FLOAT, "01234567890E0"},
+		{token.FLOAT, "0e+10"},
+		{token.FLOAT, "1e-10"},
+		{token.FLOAT, "42e+10"},
+		{token.FLOAT, "01234567890e-10"},
+		{token.FLOAT, "0E+10"},
+		{token.FLOAT, "1E-10"},
+		{token.FLOAT, "42E+10"},
+		{token.FLOAT, "01234567890E-10"},
+		{token.FLOAT, "01.8e0"},
+		{token.FLOAT, "1.4e0"},
+		{token.FLOAT, "42.2e0"},
+		{token.FLOAT, "01234567890.12e0"},
+		{token.FLOAT, "0.E0"},
+		{token.FLOAT, "1.12E0"},
+		{token.FLOAT, "42.123E0"},
+		{token.FLOAT, "01234567890.213E0"},
+		{token.FLOAT, "0.2e+10"},
+		{token.FLOAT, "1.2e-10"},
+		{token.FLOAT, "42.54e+10"},
+		{token.FLOAT, "01234567890.98e-10"},
+		{token.FLOAT, "0.1E+10"},
+		{token.FLOAT, "1.1E-10"},
+		{token.FLOAT, "42.1E+10"},
+		{token.FLOAT, "01234567890.1E-10"},
+		{token.FLOAT, "-0.0"},
+		{token.FLOAT, "-1.0"},
+		{token.FLOAT, "-42.0"},
+		{token.FLOAT, "-01234567890.0"},
+		{token.FLOAT, "-0e0"},
+		{token.FLOAT, "-1e0"},
+		{token.FLOAT, "-42e0"},
+		{token.FLOAT, "-01234567890e0"},
+		{token.FLOAT, "-0E0"},
+		{token.FLOAT, "-1E0"},
+		{token.FLOAT, "-42E0"},
+		{token.FLOAT, "-01234567890E0"},
+		{token.FLOAT, "-0e+10"},
+		{token.FLOAT, "-1e-10"},
+		{token.FLOAT, "-42e+10"},
+		{token.FLOAT, "-01234567890e-10"},
+		{token.FLOAT, "-0E+10"},
+		{token.FLOAT, "-1E-10"},
+		{token.FLOAT, "-42E+10"},
+		{token.FLOAT, "-01234567890E-10"},
+		{token.FLOAT, "-01.8e0"},
+		{token.FLOAT, "-1.4e0"},
+		{token.FLOAT, "-42.2e0"},
+		{token.FLOAT, "-01234567890.12e0"},
+		{token.FLOAT, "-0.E0"},
+		{token.FLOAT, "-1.12E0"},
+		{token.FLOAT, "-42.123E0"},
+		{token.FLOAT, "-01234567890.213E0"},
+		{token.FLOAT, "-0.2e+10"},
+		{token.FLOAT, "-1.2e-10"},
+		{token.FLOAT, "-42.54e+10"},
+		{token.FLOAT, "-01234567890.98e-10"},
+		{token.FLOAT, "-0.1E+10"},
+		{token.FLOAT, "-1.1E-10"},
+		{token.FLOAT, "-42.1E+10"},
+		{token.FLOAT, "-01234567890.1E-10"},
+	},
+}
+
+var orderedTokenLists = []string{
+	"comment",
+	"operator",
+	"bool",
+	"string",
+	"number",
+	"float",
+}
+
+func TestPosition(t *testing.T) {
+	// create artifical source code
+	buf := new(bytes.Buffer)
+
+	for _, listName := range orderedTokenLists {
+		for _, ident := range tokenLists[listName] {
+			fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
+		}
+	}
+
+	s := New(buf.Bytes())
+
+	pos := token.Pos{"", 4, 1, 5}
+	s.Scan()
+	for _, listName := range orderedTokenLists {
+
+		for _, k := range tokenLists[listName] {
+			curPos := s.tokPos
+			// fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
+
+			if curPos.Offset != pos.Offset {
+				t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
+			}
+			if curPos.Line != pos.Line {
+				t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
+			}
+			if curPos.Column != pos.Column {
+				t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
+			}
+			pos.Offset += 4 + len(k.text) + 1     // 4 tabs + token bytes + newline
+			pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+
+			s.Error = func(pos token.Pos, msg string) {
+				t.Errorf("error %q for %q", msg, k.text)
+			}
+
+			s.Scan()
+		}
+	}
+	// make sure there were no token-internal errors reported by scanner
+	if s.ErrorCount != 0 {
+		t.Errorf("%d errors", s.ErrorCount)
+	}
+}
+
+func TestComment(t *testing.T) {
+	testTokenList(t, tokenLists["comment"])
+}
+
+func TestOperator(t *testing.T) {
+	testTokenList(t, tokenLists["operator"])
+}
+
+func TestBool(t *testing.T) {
+	testTokenList(t, tokenLists["bool"])
+}
+
+func TestIdent(t *testing.T) {
+	testTokenList(t, tokenLists["ident"])
+}
+
+func TestString(t *testing.T) {
+	testTokenList(t, tokenLists["string"])
+}
+
+func TestNumber(t *testing.T) {
+	testTokenList(t, tokenLists["number"])
+}
+
+func TestFloat(t *testing.T) {
+	testTokenList(t, tokenLists["float"])
+}
+
+func TestRealExample(t *testing.T) {
+	complexReal := `
+{
+    "variable": {
+        "foo": {
+            "default": "bar",
+            "description": "bar",
+            "depends_on": ["something"]
+        }
+    }
+}`
+
+	literals := []struct {
+		tokenType token.Type
+		literal   string
+	}{
+		{token.LBRACE, `{`},
+		{token.STRING, `"variable"`},
+		{token.COLON, `:`},
+		{token.LBRACE, `{`},
+		{token.STRING, `"foo"`},
+		{token.COLON, `:`},
+		{token.LBRACE, `{`},
+		{token.STRING, `"default"`},
+		{token.COLON, `:`},
+		{token.STRING, `"bar"`},
+		{token.COMMA, `,`},
+		{token.STRING, `"description"`},
+		{token.COLON, `:`},
+		{token.STRING, `"bar"`},
+		{token.COMMA, `,`},
+		{token.STRING, `"depends_on"`},
+		{token.COLON, `:`},
+		{token.LBRACK, `[`},
+		{token.STRING, `"something"`},
+		{token.RBRACK, `]`},
+		{token.RBRACE, `}`},
+		{token.RBRACE, `}`},
+		{token.RBRACE, `}`},
+		{token.EOF, ``},
+	}
+
+	s := New([]byte(complexReal))
+	for _, l := range literals {
+		tok := s.Scan()
+		if l.tokenType != tok.Type {
+			t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+		}
+
+		if l.literal != tok.Text {
+			t.Errorf("got: %s want %s\n", tok, l.literal)
+		}
+	}
+
+}
+
+func TestError(t *testing.T) {
+	testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+	testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+
+	testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
+	testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
+
+	testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER)
+	testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER)
+	testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL)
+
+	testError(t, `"`, "1:2", "literal not terminated", token.STRING)
+	testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
+	testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
+}
+
+func testError(t *testing.T, src, pos, msg string, tok token.Type) {
+	s := New([]byte(src))
+
+	errorCalled := false
+	s.Error = func(p token.Pos, m string) {
+		if !errorCalled {
+			if pos != p.String() {
+				t.Errorf("pos = %q, want %q for %q", p, pos, src)
+			}
+
+			if m != msg {
+				t.Errorf("msg = %q, want %q for %q", m, msg, src)
+			}
+			errorCalled = true
+		}
+	}
+
+	tk := s.Scan()
+	if tk.Type != tok {
+		t.Errorf("tok = %s, want %s for %q", tk, tok, src)
+	}
+	if !errorCalled {
+		t.Errorf("error handler not called for %q", src)
+	}
+	if s.ErrorCount == 0 {
+		t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+	}
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+	// create artifical source code
+	buf := new(bytes.Buffer)
+	for _, ident := range tokenList {
+		fmt.Fprintf(buf, "%s\n", ident.text)
+	}
+
+	s := New(buf.Bytes())
+	for _, ident := range tokenList {
+		tok := s.Scan()
+		if tok.Type != ident.tok {
+			t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+		}
+
+		if tok.Text != ident.text {
+			t.Errorf("text = %q want %q", tok.String(), ident.text)
+		}
+
+	}
+}
+
+func countNewlines(s string) int {
+	n := 0
+	for _, ch := range s {
+		if ch == '\n' {
+			n++
+		}
+	}
+	return n
+}
diff --git a/json/test-fixtures/array.json b/json/test-fixtures/array.json
new file mode 100644
index 0000000..e320f17
--- /dev/null
+++ b/json/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{

+	"foo": [1, 2, "bar"],

+	"bar": "baz"

+}

diff --git a/json/test-fixtures/basic.json b/json/test-fixtures/basic.json
new file mode 100644
index 0000000..b54bde9
--- /dev/null
+++ b/json/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{

+	"foo": "bar"

+}

diff --git a/json/test-fixtures/object.json b/json/test-fixtures/object.json
new file mode 100644
index 0000000..72168a3
--- /dev/null
+++ b/json/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{

+	"foo": {

+		"bar": [1,2]

+	}

+}

diff --git a/json/test-fixtures/types.json b/json/test-fixtures/types.json
new file mode 100644
index 0000000..9a142a6
--- /dev/null
+++ b/json/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{

+	"foo": "bar",

+	"bar": 7,

+	"baz": [1,2,3],

+	"foo": -12,

+	"bar": 3.14159,

+    "foo": true,

+    "bar": false,

+    "foo": null

+}

diff --git a/json/token/position.go b/json/token/position.go
new file mode 100644
index 0000000..59c1bb7
--- /dev/null
+++ b/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/json/token/token.go b/json/token/token.go
new file mode 100644
index 0000000..4b590c9
--- /dev/null
+++ b/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+	"fmt"
+	"strconv"
+
+	hcltoken "google3/third_party/golang/hashicorp/hcl/hcl/token/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+
+	identifier_beg
+	literal_beg
+	NUMBER // 12345
+	FLOAT  // 123.45
+	BOOL   // true,false
+	STRING // "abc"
+	NULL   // null
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+	COLON  // :
+
+	RBRACK // ]
+	RBRACE // }
+
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF: "EOF",
+
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+	NULL:   "NULL",
+
+	LBRACK: "LBRACK",
+	LBRACE: "LBRACE",
+	COMMA:  "COMMA",
+	PERIOD: "PERIOD",
+	COLON:  "COLON",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+	switch t.Type {
+	case BOOL:
+		return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+	case FLOAT:
+		return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+	case NULL:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+	case NUMBER:
+		return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+	case STRING:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+	default:
+		panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+	}
+}
diff --git a/json/token/token_test.go b/json/token/token_test.go
new file mode 100644
index 0000000..a83fdd5
--- /dev/null
+++ b/json/token/token_test.go
@@ -0,0 +1,34 @@
+package token
+
+import (
+	"testing"
+)
+
+func TestTypeString(t *testing.T) {
+	var tokens = []struct {
+		tt  Type
+		str string
+	}{
+		{ILLEGAL, "ILLEGAL"},
+		{EOF, "EOF"},
+		{NUMBER, "NUMBER"},
+		{FLOAT, "FLOAT"},
+		{BOOL, "BOOL"},
+		{STRING, "STRING"},
+		{NULL, "NULL"},
+		{LBRACK, "LBRACK"},
+		{LBRACE, "LBRACE"},
+		{COMMA, "COMMA"},
+		{PERIOD, "PERIOD"},
+		{RBRACK, "RBRACK"},
+		{RBRACE, "RBRACE"},
+	}
+
+	for _, token := range tokens {
+		if token.tt.String() != token.str {
+			t.Errorf("want: %q got:%q\n", token.str, token.tt)
+
+		}
+	}
+
+}
diff --git a/lex.go b/lex.go
new file mode 100644
index 0000000..d9993c2
--- /dev/null
+++ b/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+	"unicode"
+	"unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+	lexModeUnknown lexModeValue = iota
+	lexModeHcl
+	lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+	var (
+		r      rune
+		w      int
+		offset int
+	)
+
+	for {
+		r, w = utf8.DecodeRune(v[offset:])
+		offset += w
+		if unicode.IsSpace(r) {
+			continue
+		}
+		if r == '{' {
+			return lexModeJson
+		}
+		break
+	}
+
+	return lexModeHcl
+}
diff --git a/lex_test.go b/lex_test.go
new file mode 100644
index 0000000..8062764
--- /dev/null
+++ b/lex_test.go
@@ -0,0 +1,37 @@
+package hcl
+
+import (
+	"testing"
+)
+
+func TestLexMode(t *testing.T) {
+	cases := []struct {
+		Input string
+		Mode  lexModeValue
+	}{
+		{
+			"",
+			lexModeHcl,
+		},
+		{
+			"foo",
+			lexModeHcl,
+		},
+		{
+			"{}",
+			lexModeJson,
+		},
+		{
+			"  {}",
+			lexModeJson,
+		},
+	}
+
+	for i, tc := range cases {
+		actual := lexMode([]byte(tc.Input))
+
+		if actual != tc.Mode {
+			t.Fatalf("%d: %#v", i, actual)
+		}
+	}
+}
diff --git a/parse.go b/parse.go
new file mode 100644
index 0000000..bfa08fb
--- /dev/null
+++ b/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+	"fmt"
+
+	"google3/third_party/golang/hashicorp/hcl/hcl/ast/ast"
+	hclParser "google3/third_party/golang/hashicorp/hcl/hcl/parser/parser"
+	jsonParser "google3/third_party/golang/hashicorp/hcl/json/parser/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+	return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+	switch lexMode(in) {
+	case lexModeHcl:
+		return hclParser.Parse(in)
+	case lexModeJson:
+		return jsonParser.Parse(in)
+	}
+
+	return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
diff --git a/test-fixtures/assign_deep.hcl b/test-fixtures/assign_deep.hcl
new file mode 100644
index 0000000..dd3151c
--- /dev/null
+++ b/test-fixtures/assign_deep.hcl
@@ -0,0 +1,5 @@
+resource = [{

+	foo = [{

+		bar = {}

+	}]

+}]

diff --git a/test-fixtures/basic.hcl b/test-fixtures/basic.hcl
new file mode 100644
index 0000000..9499944
--- /dev/null
+++ b/test-fixtures/basic.hcl
@@ -0,0 +1,2 @@
+foo = "bar"

+bar = "${file("bing/bong.txt")}"

diff --git a/test-fixtures/basic.json b/test-fixtures/basic.json
new file mode 100644
index 0000000..7bdddc8
--- /dev/null
+++ b/test-fixtures/basic.json
@@ -0,0 +1,4 @@
+{

+	"foo": "bar",

+    "bar": "${file(\"bing/bong.txt\")}"

+}

diff --git a/test-fixtures/basic_bool.hcl b/test-fixtures/basic_bool.hcl
new file mode 100644
index 0000000..024c06c
--- /dev/null
+++ b/test-fixtures/basic_bool.hcl
@@ -0,0 +1 @@
+boolean = true
diff --git a/test-fixtures/basic_bool_int.hcl b/test-fixtures/basic_bool_int.hcl
new file mode 100644
index 0000000..9e1b45e
--- /dev/null
+++ b/test-fixtures/basic_bool_int.hcl
@@ -0,0 +1 @@
+boolean = 1
diff --git a/test-fixtures/basic_bool_string.hcl b/test-fixtures/basic_bool_string.hcl
new file mode 100644
index 0000000..4b805de
--- /dev/null
+++ b/test-fixtures/basic_bool_string.hcl
@@ -0,0 +1 @@
+boolean = "trUe"
diff --git a/test-fixtures/basic_int_string.hcl b/test-fixtures/basic_int_string.hcl
new file mode 100644
index 0000000..4e415da
--- /dev/null
+++ b/test-fixtures/basic_int_string.hcl
@@ -0,0 +1 @@
+count = "3"
diff --git a/test-fixtures/basic_squish.hcl b/test-fixtures/basic_squish.hcl
new file mode 100644
index 0000000..363697b
--- /dev/null
+++ b/test-fixtures/basic_squish.hcl
@@ -0,0 +1,3 @@
+foo="bar"
+bar="${file("bing/bong.txt")}"
+foo-bar="baz"
diff --git a/test-fixtures/block_assign.hcl b/test-fixtures/block_assign.hcl
new file mode 100644
index 0000000..ee8b06f
--- /dev/null
+++ b/test-fixtures/block_assign.hcl
@@ -0,0 +1,2 @@
+environment = "aws" {
+}
diff --git a/test-fixtures/decode_policy.hcl b/test-fixtures/decode_policy.hcl
new file mode 100644
index 0000000..5b185cc
--- /dev/null
+++ b/test-fixtures/decode_policy.hcl
@@ -0,0 +1,15 @@
+key "" {
+	policy = "read"
+}
+
+key "foo/" {
+	policy = "write"
+}
+
+key "foo/bar/" {
+	policy = "read"
+}
+
+key "foo/bar/baz" {
+	policy = "deny"
+}
diff --git a/test-fixtures/decode_policy.json b/test-fixtures/decode_policy.json
new file mode 100644
index 0000000..151864e
--- /dev/null
+++ b/test-fixtures/decode_policy.json
@@ -0,0 +1,19 @@
+{
+    "key": {
+        "": {
+            "policy": "read"
+        },
+
+        "foo/": {
+            "policy": "write"
+        },
+
+        "foo/bar/": {
+            "policy": "read"
+        },
+
+        "foo/bar/baz": {
+            "policy": "deny"
+        }
+    }
+}
diff --git a/test-fixtures/decode_tf_variable.hcl b/test-fixtures/decode_tf_variable.hcl
new file mode 100644
index 0000000..52dcaa1
--- /dev/null
+++ b/test-fixtures/decode_tf_variable.hcl
@@ -0,0 +1,10 @@
+variable "foo" {
+    default = "bar"
+    description = "bar"
+}
+
+variable "amis" {
+    default = {
+        east = "foo"
+    }
+}
diff --git a/test-fixtures/decode_tf_variable.json b/test-fixtures/decode_tf_variable.json
new file mode 100644
index 0000000..49f921e
--- /dev/null
+++ b/test-fixtures/decode_tf_variable.json
@@ -0,0 +1,14 @@
+{
+    "variable": {
+        "foo": {
+            "default": "bar",
+            "description": "bar"
+        },
+
+        "amis": {
+            "default": {
+                "east": "foo"
+            }
+        }
+    }
+}
diff --git a/test-fixtures/empty.hcl b/test-fixtures/empty.hcl
new file mode 100644
index 0000000..5be1b23
--- /dev/null
+++ b/test-fixtures/empty.hcl
@@ -0,0 +1 @@
+resource "foo" {}
diff --git a/test-fixtures/escape.hcl b/test-fixtures/escape.hcl
new file mode 100644
index 0000000..f818b15
--- /dev/null
+++ b/test-fixtures/escape.hcl
@@ -0,0 +1,6 @@
+foo = "bar\"baz\\n"
+bar = "new\nline"
+qux = "back\\slash"
+qax = "slash\\:colon"
+nested = "${HH\\:mm\\:ss}"
+nestedquotes = "${"\"stringwrappedinquotes\""}"
diff --git a/test-fixtures/escape_backslash.hcl b/test-fixtures/escape_backslash.hcl
new file mode 100644
index 0000000..bc337fb
--- /dev/null
+++ b/test-fixtures/escape_backslash.hcl
@@ -0,0 +1,5 @@
+output {
+  one = "${replace(var.sub_domain, ".", "\\.")}"
+  two = "${replace(var.sub_domain, ".", "\\\\.")}"
+  many = "${replace(var.sub_domain, ".", "\\\\\\\\.")}"
+}
diff --git a/test-fixtures/flat.hcl b/test-fixtures/flat.hcl
new file mode 100644
index 0000000..9bca551
--- /dev/null
+++ b/test-fixtures/flat.hcl
@@ -0,0 +1,2 @@
+foo = "bar"

+Key = 7

diff --git a/test-fixtures/float.hcl b/test-fixtures/float.hcl
new file mode 100644
index 0000000..edf355e
--- /dev/null
+++ b/test-fixtures/float.hcl
@@ -0,0 +1,2 @@
+a = 1.02
+b = 2
diff --git a/test-fixtures/float.json b/test-fixtures/float.json
new file mode 100644
index 0000000..5808680
--- /dev/null
+++ b/test-fixtures/float.json
@@ -0,0 +1,4 @@
+{
+	"a": 1.02,
+	"b": 2
+}
diff --git a/test-fixtures/git_crypt.hcl b/test-fixtures/git_crypt.hcl
new file mode 100644
index 0000000..f691948
--- /dev/null
+++ b/test-fixtures/git_crypt.hcl
Binary files differ
diff --git a/test-fixtures/interpolate.json b/test-fixtures/interpolate.json
new file mode 100644
index 0000000..cad0151
--- /dev/null
+++ b/test-fixtures/interpolate.json
@@ -0,0 +1,3 @@
+{
+  "default": "${replace(\"europe-west\", \"-\", \" \")}"
+}
diff --git a/test-fixtures/list_of_lists.hcl b/test-fixtures/list_of_lists.hcl
new file mode 100644
index 0000000..8af3458
--- /dev/null
+++ b/test-fixtures/list_of_lists.hcl
@@ -0,0 +1,2 @@
+foo = [["foo"], ["bar"]]
+
diff --git a/test-fixtures/list_of_maps.hcl b/test-fixtures/list_of_maps.hcl
new file mode 100644
index 0000000..985a33b
--- /dev/null
+++ b/test-fixtures/list_of_maps.hcl
@@ -0,0 +1,4 @@
+foo = [
+  {somekey1 = "someval1"},
+  {somekey2 = "someval2", someextrakey = "someextraval"},
+]
diff --git a/test-fixtures/multiline.hcl b/test-fixtures/multiline.hcl
new file mode 100644
index 0000000..f883bd7
--- /dev/null
+++ b/test-fixtures/multiline.hcl
@@ -0,0 +1,4 @@
+foo = <<EOF
+bar
+baz
+EOF
diff --git a/test-fixtures/multiline.json b/test-fixtures/multiline.json
new file mode 100644
index 0000000..93f7cc5
--- /dev/null
+++ b/test-fixtures/multiline.json
@@ -0,0 +1,3 @@
+{
+    "foo": "bar\nbaz"
+}
diff --git a/test-fixtures/multiline_bad.hcl b/test-fixtures/multiline_bad.hcl
new file mode 100644
index 0000000..4cd0f4d
--- /dev/null
+++ b/test-fixtures/multiline_bad.hcl
@@ -0,0 +1,4 @@
+foo = <EOF
+bar
+baz
+EOF
diff --git a/test-fixtures/multiline_indented.hcl b/test-fixtures/multiline_indented.hcl
new file mode 100644
index 0000000..f1d7a84
--- /dev/null
+++ b/test-fixtures/multiline_indented.hcl
@@ -0,0 +1,4 @@
+foo = <<-EOF
+        bar
+        baz
+      EOF
diff --git a/test-fixtures/multiline_literal.hcl b/test-fixtures/multiline_literal.hcl
new file mode 100644
index 0000000..f89fdfc
--- /dev/null
+++ b/test-fixtures/multiline_literal.hcl
@@ -0,0 +1,2 @@
+multiline_literal = "hello
+  world"
\ No newline at end of file
diff --git a/test-fixtures/multiline_literal_with_hil.hcl b/test-fixtures/multiline_literal_with_hil.hcl
new file mode 100644
index 0000000..b55a361
--- /dev/null
+++ b/test-fixtures/multiline_literal_with_hil.hcl
@@ -0,0 +1,2 @@
+multiline_literal_with_hil = "${hello
+  world}"
\ No newline at end of file
diff --git a/test-fixtures/multiline_no_eof.hcl b/test-fixtures/multiline_no_eof.hcl
new file mode 100644
index 0000000..faa1329
--- /dev/null
+++ b/test-fixtures/multiline_no_eof.hcl
@@ -0,0 +1,5 @@
+foo = <<EOF
+bar
+baz
+EOF
+key = "value"
diff --git a/test-fixtures/multiline_no_hanging_indent.hcl b/test-fixtures/multiline_no_hanging_indent.hcl
new file mode 100644
index 0000000..c4331ee
--- /dev/null
+++ b/test-fixtures/multiline_no_hanging_indent.hcl
@@ -0,0 +1,5 @@
+foo = <<-EOF
+  baz
+    bar
+      foo
+      EOF
diff --git a/test-fixtures/multiline_no_marker.hcl b/test-fixtures/multiline_no_marker.hcl
new file mode 100644
index 0000000..55c1739
--- /dev/null
+++ b/test-fixtures/multiline_no_marker.hcl
@@ -0,0 +1 @@
+foo = <<
diff --git a/test-fixtures/nested_block_comment.hcl b/test-fixtures/nested_block_comment.hcl
new file mode 100644
index 0000000..e827782
--- /dev/null
+++ b/test-fixtures/nested_block_comment.hcl
@@ -0,0 +1,5 @@
+/*
+foo = "bar/*"
+*/
+
+bar = "value"
diff --git a/test-fixtures/nested_provider_bad.hcl b/test-fixtures/nested_provider_bad.hcl
new file mode 100644
index 0000000..94a753a
--- /dev/null
+++ b/test-fixtures/nested_provider_bad.hcl
@@ -0,0 +1,5 @@
+resource "aws" "web" {
+  provider = "aws" {
+    region = "us-west-2"
+  }
+}
diff --git a/test-fixtures/null_strings.json b/test-fixtures/null_strings.json
new file mode 100644
index 0000000..a5b8a5a
--- /dev/null
+++ b/test-fixtures/null_strings.json
@@ -0,0 +1,7 @@
+{
+  "module": {
+    "app": {
+      "foo": null
+    }
+  }
+}
diff --git a/test-fixtures/object_list.json b/test-fixtures/object_list.json
new file mode 100644
index 0000000..73f3674
--- /dev/null
+++ b/test-fixtures/object_list.json
@@ -0,0 +1,15 @@
+{
+    "resource": {
+        "aws_instance": {
+            "db": {
+                "vpc": "foo",
+                "provisioner": [{
+                    "file": {
+                        "source": "foo",
+                        "destination": "bar"
+                    }
+                }]
+            }
+		}
+	}
+}
diff --git a/test-fixtures/object_with_bool.hcl b/test-fixtures/object_with_bool.hcl
new file mode 100644
index 0000000..e565fb4
--- /dev/null
+++ b/test-fixtures/object_with_bool.hcl
@@ -0,0 +1,6 @@
+path {
+	policy = "write"
+	permissions = {
+        "bool" = [false]
+	}
+}
diff --git a/test-fixtures/scientific.hcl b/test-fixtures/scientific.hcl
new file mode 100644
index 0000000..b9eca28
--- /dev/null
+++ b/test-fixtures/scientific.hcl
@@ -0,0 +1,6 @@
+a = 1e-10
+b = 1e+10
+c = 1e10
+d = 1.2e-10
+e = 1.2e+10
+f = 1.2e10
diff --git a/test-fixtures/scientific.json b/test-fixtures/scientific.json
new file mode 100644
index 0000000..c1fce3c
--- /dev/null
+++ b/test-fixtures/scientific.json
@@ -0,0 +1,8 @@
+{
+    "a": 1e-10,
+    "b": 1e+10,
+    "c": 1e10,
+    "d": 1.2e-10,
+    "e": 1.2e+10,
+    "f": 1.2e10
+}
diff --git a/test-fixtures/slice_expand.hcl b/test-fixtures/slice_expand.hcl
new file mode 100644
index 0000000..4d3725f
--- /dev/null
+++ b/test-fixtures/slice_expand.hcl
@@ -0,0 +1,7 @@
+service "my-service-0" {
+  key = "value"
+}
+
+service "my-service-1" {
+  key = "value"
+}
diff --git a/test-fixtures/structure.hcl b/test-fixtures/structure.hcl
new file mode 100644
index 0000000..18b6b1e
--- /dev/null
+++ b/test-fixtures/structure.hcl
@@ -0,0 +1,5 @@
+// This is a test structure for the lexer
+foo "baz" {
+	key = 7
+	foo = "bar"
+}
diff --git a/test-fixtures/structure.json b/test-fixtures/structure.json
new file mode 100644
index 0000000..30aa765
--- /dev/null
+++ b/test-fixtures/structure.json
@@ -0,0 +1,8 @@
+{

+	"foo": [{

+		"baz": [{

+			"key": 7,

+			"foo": "bar"

+		}]

+	}]

+}

diff --git a/test-fixtures/structure2.hcl b/test-fixtures/structure2.hcl
new file mode 100644
index 0000000..7577ffc
--- /dev/null
+++ b/test-fixtures/structure2.hcl
@@ -0,0 +1,9 @@
+// This is a test structure for the lexer

+foo "baz" {

+	key = 7

+	foo = "bar"

+}

+

+foo {

+	key = 7

+}

diff --git a/test-fixtures/structure2.json b/test-fixtures/structure2.json
new file mode 100644
index 0000000..c51fcf5
--- /dev/null
+++ b/test-fixtures/structure2.json
@@ -0,0 +1,10 @@
+{

+	"foo": [{

+		"baz": {

+			"key": 7,

+			"foo": "bar"

+		}

+    }, {

+		"key": 7

+	}]

+}

diff --git a/test-fixtures/structure_flat.json b/test-fixtures/structure_flat.json
new file mode 100644
index 0000000..5256db4
--- /dev/null
+++ b/test-fixtures/structure_flat.json
@@ -0,0 +1,8 @@
+{

+	"foo": {

+		"baz": {

+			"key": 7,

+			"foo": "bar"

+		}

+	}

+}

diff --git a/test-fixtures/structure_flatmap.hcl b/test-fixtures/structure_flatmap.hcl
new file mode 100644
index 0000000..fcf689e
--- /dev/null
+++ b/test-fixtures/structure_flatmap.hcl
@@ -0,0 +1,7 @@
+foo {

+	key = 7

+}

+

+foo {

+	foo = "bar"

+}

diff --git a/test-fixtures/structure_list.hcl b/test-fixtures/structure_list.hcl
new file mode 100644
index 0000000..33193ae
--- /dev/null
+++ b/test-fixtures/structure_list.hcl
@@ -0,0 +1,6 @@
+foo {
+    key = 7
+}
+foo {
+    key = 12
+}
diff --git a/test-fixtures/structure_list.json b/test-fixtures/structure_list.json
new file mode 100644
index 0000000..806a60e
--- /dev/null
+++ b/test-fixtures/structure_list.json
@@ -0,0 +1,7 @@
+{
+	"foo": [{
+        "key": 7
+    }, {
+        "key": 12
+    }]
+}
diff --git a/test-fixtures/structure_list_deep.json b/test-fixtures/structure_list_deep.json
new file mode 100644
index 0000000..46e98be
--- /dev/null
+++ b/test-fixtures/structure_list_deep.json
@@ -0,0 +1,16 @@
+{
+    "bar": {
+        "foo": {
+            "name": "terraform_example",
+            "ingress": [
+                {
+                    "from_port": 22
+                },
+                {
+                    "from_port": 80
+                }
+            ]
+        }
+    }
+}
+
diff --git a/test-fixtures/structure_list_empty.json b/test-fixtures/structure_list_empty.json
new file mode 100644
index 0000000..d99606f
--- /dev/null
+++ b/test-fixtures/structure_list_empty.json
@@ -0,0 +1,3 @@
+{
+	"foo": []
+}
diff --git a/test-fixtures/structure_multi.hcl b/test-fixtures/structure_multi.hcl
new file mode 100644
index 0000000..e45b23d
--- /dev/null
+++ b/test-fixtures/structure_multi.hcl
@@ -0,0 +1,7 @@
+foo "baz" {
+	key = 7
+}
+
+foo "bar" {
+	key = 12
+}
diff --git a/test-fixtures/structure_multi.json b/test-fixtures/structure_multi.json
new file mode 100644
index 0000000..773761a
--- /dev/null
+++ b/test-fixtures/structure_multi.json
@@ -0,0 +1,11 @@
+{
+	"foo": {
+		"baz": {
+			"key": 7
+		},
+
+		"bar": {
+			"key": 12
+		}
+	}
+}
diff --git a/test-fixtures/terraform_heroku.hcl b/test-fixtures/terraform_heroku.hcl
new file mode 100644
index 0000000..fda9241
--- /dev/null
+++ b/test-fixtures/terraform_heroku.hcl
@@ -0,0 +1,5 @@
+name = "terraform-test-app"
+
+config_vars {
+    FOO = "bar"
+}
diff --git a/test-fixtures/terraform_heroku.json b/test-fixtures/terraform_heroku.json
new file mode 100644
index 0000000..e8c6fac
--- /dev/null
+++ b/test-fixtures/terraform_heroku.json
@@ -0,0 +1,6 @@
+{
+    "name": "terraform-test-app",
+    "config_vars": {
+        "FOO": "bar"
+    }
+}
diff --git a/test-fixtures/terraform_variable_invalid.json b/test-fixtures/terraform_variable_invalid.json
new file mode 100644
index 0000000..081247e
--- /dev/null
+++ b/test-fixtures/terraform_variable_invalid.json
@@ -0,0 +1,5 @@
+{
+  "variable": {
+    "whatever": "abc123"
+  }
+}
diff --git a/test-fixtures/tfvars.hcl b/test-fixtures/tfvars.hcl
new file mode 100644
index 0000000..5f623e0
--- /dev/null
+++ b/test-fixtures/tfvars.hcl
@@ -0,0 +1,3 @@
+regularvar = "Should work"
+map.key1 = "Value"
+map.key2 = "Other value"
diff --git a/test-fixtures/unterminated_block_comment.hcl b/test-fixtures/unterminated_block_comment.hcl
new file mode 100644
index 0000000..c0ce34d
--- /dev/null
+++ b/test-fixtures/unterminated_block_comment.hcl
@@ -0,0 +1,2 @@
+/*
+Foo
diff --git a/test-fixtures/unterminated_brace.hcl b/test-fixtures/unterminated_brace.hcl
new file mode 100644
index 0000000..31b37c4
--- /dev/null
+++ b/test-fixtures/unterminated_brace.hcl
@@ -0,0 +1,2 @@
+foo "baz" {
+    bar = "baz"
diff --git a/testhelper/unix2dos.go b/testhelper/unix2dos.go
new file mode 100644
index 0000000..827ac6f
--- /dev/null
+++ b/testhelper/unix2dos.go
@@ -0,0 +1,15 @@
+package testhelper
+
+import (
+	"runtime"
+	"strings"
+)
+
+// Converts the line endings when on Windows
+func Unix2dos(unix string) string {
+	if runtime.GOOS != "windows" {
+		return unix
+	}
+
+	return strings.Replace(unix, "\n", "\r\n", -1)
+}