New Upstream Release - jsonnet

Ready changes

Summary

Merged new upstream version: 0.19.1+ds (was: 0.18.0+ds).

Resulting package

Built on 2022-12-31T18:55 (took 8m57s)

The resulting binary packages can be installed (if you have the apt repository enabled) by running one of:

apt install -t fresh-releases jsonnet-dbgsymapt install -t fresh-releases jsonnetapt install -t fresh-releases libjsonnet-devapt install -t fresh-releases libjsonnet0-dbgsymapt install -t fresh-releases libjsonnet0apt install -t fresh-releases python3-jsonnet-dbgsymapt install -t fresh-releases python3-jsonnet

Lintian Result

Diff

diff --git a/Makefile b/Makefile
index d270006..262e791 100644
--- a/Makefile
+++ b/Makefile
@@ -20,10 +20,6 @@
 CXX ?= g++
 CC ?= gcc
 
-# Emscripten -- For Jsonnet in the browser
-EMCXX ?= em++
-EMCC ?= emcc
-
 CP ?= cp
 OD ?= od
 
@@ -36,8 +32,6 @@ CXXFLAGS += -Iinclude -Ithird_party/md5 -Ithird_party/json -Ithird_party/rapidya
 CFLAGS ?= -g $(OPT) -Wall -Wextra -pedantic -std=c99 -fPIC
 CFLAGS += -Iinclude
 MAKEDEPENDFLAGS += -Iinclude -Ithird_party/md5 -Ithird_party/json -Ithird_party/rapidyaml/rapidyaml/src/ -Ithird_party/rapidyaml/rapidyaml/ext/c4core/src/
-EMCXXFLAGS = $(CXXFLAGS) --memory-init-file 0 -s DISABLE_EXCEPTION_CATCHING=0 -s INLINING_LIMIT=50 -s RESERVED_FUNCTION_POINTERS=20 -s ASSERTIONS=1 -s ALLOW_MEMORY_GROWTH=1
-EMCFLAGS = $(CFLAGS) --memory-init-file 0 -s DISABLE_EXCEPTION_CATCHING=0 -s ASSERTIONS=1 -s ALLOW_MEMORY_GROWTH=1
 LDFLAGS ?=
 
 
@@ -89,8 +83,6 @@ LIBS = \
 ALL = \
 	libjsonnet_test_snippet \
 	libjsonnet_test_file \
-	libjsonnet.js \
-	doc/js/libjsonnet.js \
 	$(BINS) \
 	$(LIBS) \
 	$(LIB_OBJ)
@@ -187,19 +179,6 @@ libjsonnet++.so.$(VERSION): $(LIB_CPP_OBJ)
 %.so: %.so.$(SOVERSION)
 	ln -sf $< $@
 
-# JavaScript build of C binding
-JS_EXPORTED_FUNCTIONS = 'EXPORTED_FUNCTIONS=["_jsonnet_make", "_jsonnet_evaluate_snippet", "_jsonnet_fmt_snippet", "_jsonnet_ext_var", "_jsonnet_ext_code", "_jsonnet_tla_var", "_jsonnet_tla_code", "_jsonnet_realloc", "_jsonnet_destroy", "_jsonnet_import_callback"]'
-
-JS_RUNTIME_METHODS = 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap", "getValue", "lengthBytesUTF8", "UTF8ToString", "setValue", "stringToUTF8", "addFunction"]'
-
-
-libjsonnet.js: $(LIB_SRC) $(ALL_HEADERS)
-	$(EMCXX) -s WASM=0 -s $(JS_EXPORTED_FUNCTIONS) -s $(JS_RUNTIME_METHODS) $(EMCXXFLAGS) $(LDFLAGS) $(LIB_SRC) -o $@
-
-# Copy javascript build to doc directory
-doc/js/libjsonnet.js: libjsonnet.js
-	$(CP) $^ $@
-
 # Tests for C binding.
 LIBJSONNET_TEST_SNIPPET_SRCS = \
 	core/libjsonnet_test_snippet.c \
diff --git a/README.md b/README.md
index e2cb65d..e0f31ec 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
 For an introduction to Jsonnet and documentation,
 [visit our website](https://jsonnet.org).
 
-This respositiory contains the original implementation. You can also try [go-jsonnet](https://github.com/google/go-jsonnet), a newer implementation which in some cases is orders of magnitude faster.
+This repository contains the original implementation. You can also try [go-jsonnet](https://github.com/google/go-jsonnet), a newer implementation which in some cases is orders of magnitude faster.
 
 Visit our [discussion forum](https://groups.google.com/g/jsonnet).
 
diff --git a/case_studies/micromanage/validate.py b/case_studies/micromanage/validate.py
index 91c716b..1519d91 100644
--- a/case_studies/micromanage/validate.py
+++ b/case_studies/micromanage/validate.py
@@ -38,8 +38,8 @@ def err(path, msg, note=None):
     raise ConfigError('%s: %s' % (render_path(path), msg), note)
 
 _KEYWORDS = {
-    'import', 'importstr', 'function', 'self', 'super', 'assert', 'if', 'then',
-    'else', 'for', 'in', 'local', 'tailstrict', 'true', 'false', 'null', 'error',
+    'import', 'importstr', 'importbin', 'function', 'self', 'super', 'assert', 'if',
+    'then', 'else', 'for', 'in', 'local', 'tailstrict', 'true', 'false', 'null', 'error',
 }
 
 def _isidentifier(name):
diff --git a/core/ast.h b/core/ast.h
index c9032c0..b0f6148 100644
--- a/core/ast.h
+++ b/core/ast.h
@@ -45,6 +45,7 @@ enum ASTType {
     AST_FUNCTION,
     AST_IMPORT,
     AST_IMPORTSTR,
+    AST_IMPORTBIN,
     AST_INDEX,
     AST_IN_SUPER,
     AST_LITERAL_BOOLEAN,
@@ -80,6 +81,7 @@ static inline std::string ASTTypeToString(ASTType type)
         case AST_FUNCTION: return "AST_FUNCTION";
         case AST_IMPORT: return "AST_IMPORT";
         case AST_IMPORTSTR: return "AST_IMPORTSTR";
+        case AST_IMPORTBIN: return "AST_IMPORTBIN";
         case AST_INDEX: return "AST_INDEX";
         case AST_IN_SUPER: return "AST_IN_SUPER";
         case AST_LITERAL_BOOLEAN: return "AST_LITERAL_BOOLEAN";
@@ -455,6 +457,15 @@ struct Importstr : public AST {
     }
 };
 
+/** Represents importbin "file". */
+struct Importbin : public AST {
+    LiteralString *file;
+    Importbin(const LocationRange &lr, const Fodder &open_fodder, LiteralString *file)
+        : AST(lr, AST_IMPORTBIN, open_fodder), file(file)
+    {
+    }
+};
+
 /** Represents both e[e] and the syntax sugar e.f.
  *
  * One of index and id will be nullptr before desugaring.  After desugaring id will be nullptr.
@@ -647,8 +658,8 @@ struct ObjectField {
 
     ObjectField(enum Kind kind, const Fodder &fodder1, const Fodder &fodder2,
                 const Fodder &fodder_l, const Fodder &fodder_r, enum Hide hide, bool super_sugar,
-                bool method_sugar, AST *expr1, const Identifier *id, const LocationRange &id_lr, 
-                const ArgParams &params, bool trailing_comma, const Fodder &op_fodder, AST *expr2, 
+                bool method_sugar, AST *expr1, const Identifier *id, const LocationRange &id_lr,
+                const ArgParams &params, bool trailing_comma, const Fodder &op_fodder, AST *expr2,
                 AST *expr3, const Fodder &comma_fodder)
         : kind(kind),
           fodder1(fodder1),
diff --git a/core/desugarer.cpp b/core/desugarer.cpp
index 9244e1a..956be9e 100644
--- a/core/desugarer.cpp
+++ b/core/desugarer.cpp
@@ -772,6 +772,12 @@ class Desugarer {
             desugar(file, obj_level);
             ast->file = dynamic_cast<LiteralString *>(file);
 
+        } else if (auto *ast = dynamic_cast<Importbin *>(ast_)) {
+            // TODO(dcunnin): Abstract this into a template function if it becomes more common.
+            AST *file = ast->file;
+            desugar(file, obj_level);
+            ast->file = dynamic_cast<LiteralString *>(file);
+
         } else if (auto *ast = dynamic_cast<InSuper *>(ast_)) {
             desugar(ast->element, obj_level);
 
diff --git a/core/formatter.cpp b/core/formatter.cpp
index 5161f2b..e8e35bb 100644
--- a/core/formatter.cpp
+++ b/core/formatter.cpp
@@ -67,7 +67,7 @@ static AST *left_recursive_deep(AST *ast_)
  * \param fodder The fodder to print
  * \param space_before Whether a space should be printed before any other output.
  * \param separate_token If the last fodder was an interstitial, whether a space should follow it.
- * \param final Whether fodder is the last one in 
+ * \param final Whether fodder is the last one in
  */
 void fodder_fill(std::ostream &o, const Fodder &fodder, bool space_before, bool separate_token, bool final)
 {
@@ -414,6 +414,10 @@ class Unparser {
             o << "importstr";
             unparse(ast->file, true);
 
+        } else if (auto *ast = dynamic_cast<const Importbin *>(ast_)) {
+            o << "importbin";
+            unparse(ast->file, true);
+
         } else if (auto *ast = dynamic_cast<const InSuper *>(ast_)) {
             unparse(ast->element, true);
             fill(ast->inFodder, true, true);
@@ -1784,6 +1788,11 @@ class FixIndentation {
             Indent new_indent = newIndent(open_fodder(ast->file), indent, column + 1);
             expr(ast->file, new_indent, true);
 
+        } else if (auto *ast = dynamic_cast<Importbin *>(ast_)) {
+            column += 9;  // importbin
+            Indent new_indent = newIndent(open_fodder(ast->file), indent, column + 1);
+            expr(ast->file, new_indent, true);
+
         } else if (auto *ast = dynamic_cast<InSuper *>(ast_)) {
             expr(ast->element, indent, space_before);
             fill(ast->inFodder, true, true, indent.lineUp);
diff --git a/core/lexer.cpp b/core/lexer.cpp
index 1e0ae72..6379e7b 100644
--- a/core/lexer.cpp
+++ b/core/lexer.cpp
@@ -190,6 +190,7 @@ static const std::map<std::string, Token::Kind> keywords = {
     {"if", Token::IF},
     {"import", Token::IMPORT},
     {"importstr", Token::IMPORTSTR},
+    {"importbin", Token::IMPORTBIN},
     {"in", Token::IN},
     {"local", Token::LOCAL},
     {"null", Token::NULL_LIT},
diff --git a/core/lexer.h b/core/lexer.h
index d260d46..c630d3e 100644
--- a/core/lexer.h
+++ b/core/lexer.h
@@ -263,6 +263,7 @@ struct Token {
         IF,
         IMPORT,
         IMPORTSTR,
+        IMPORTBIN,
         IN,
         LOCAL,
         NULL_LIT,
@@ -346,6 +347,7 @@ struct Token {
             case IF: return "if";
             case IMPORT: return "import";
             case IMPORTSTR: return "importstr";
+            case IMPORTBIN: return "importbin";
             case IN: return "in";
             case LOCAL: return "local";
             case NULL_LIT: return "null";
diff --git a/core/lexer_test.cpp b/core/lexer_test.cpp
index d49f9e5..090e351 100644
--- a/core/lexer_test.cpp
+++ b/core/lexer_test.cpp
@@ -309,6 +309,7 @@ TEST(Lexer, TestKeywords)
     testLex("if", "if", {Token(Token::Kind::IF, "if")}, "");
     testLex("import", "import", {Token(Token::Kind::IMPORT, "import")}, "");
     testLex("importstr", "importstr", {Token(Token::Kind::IMPORTSTR, "importstr")}, "");
+    testLex("importbin", "importbin", {Token(Token::Kind::IMPORTBIN, "importbin")}, "");
     testLex("in", "in", {Token(Token::Kind::IN, "in")}, "");
     testLex("local", "local", {Token(Token::Kind::LOCAL, "local")}, "");
     testLex("null", "null", {Token(Token::Kind::NULL_LIT, "null")}, "");
diff --git a/core/libjsonnet.cpp b/core/libjsonnet.cpp
index c5f0c30..2d35336 100644
--- a/core/libjsonnet.cpp
+++ b/core/libjsonnet.cpp
@@ -45,12 +45,20 @@ static void memory_panic(void)
 static char *from_string(JsonnetVm *vm, const std::string &v)
 {
     char *r = jsonnet_realloc(vm, nullptr, v.length() + 1);
-    std::strcpy(r, v.c_str());
+    std::memcpy(r, v.c_str(), v.length() + 1);
     return r;
 }
 
-static char *default_import_callback(void *ctx, const char *dir, const char *file,
-                                     char **found_here_cptr, int *success);
+static char *from_string_nonull(JsonnetVm *vm, const std::string &v, size_t *buflen)
+{
+    char *r = jsonnet_realloc(vm, nullptr, v.length());
+    std::memcpy(r, v.data(), v.length());
+    *buflen = v.length();
+    return r;
+}
+
+static int default_import_callback(void *ctx, const char *dir, const char *file,
+                                   char **found_here_cptr, char **buf, size_t *buflen);
 
 const char *jsonnet_json_extract_string(JsonnetVm *vm, const struct JsonnetJsonValue *v)
 {
@@ -229,8 +237,8 @@ static enum ImportStatus try_path(const std::string &dir, const std::string &rel
     return IMPORT_STATUS_OK;
 }
 
-static char *default_import_callback(void *ctx, const char *dir, const char *file,
-                                     char **found_here_cptr, int *success)
+static int default_import_callback(void *ctx, const char *dir, const char *file,
+                                   char **found_here_cptr, char **buf, size_t *buflen)
 {
     auto *vm = static_cast<JsonnetVm *>(ctx);
 
@@ -243,25 +251,23 @@ static char *default_import_callback(void *ctx, const char *dir, const char *fil
     // If not found, try library search path.
     while (status == IMPORT_STATUS_FILE_NOT_FOUND) {
         if (jpaths.size() == 0) {
-            *success = 0;
             const char *err = "no match locally or in the Jsonnet library paths.";
-            char *r = jsonnet_realloc(vm, nullptr, std::strlen(err) + 1);
-            std::strcpy(r, err);
-            return r;
+            *buf = from_string_nonull(vm, err, buflen);
+            return 1;  // failure
         }
         status = try_path(jpaths.back(), file, input, found_here, err_msg);
         jpaths.pop_back();
     }
 
     if (status == IMPORT_STATUS_IO_ERROR) {
-        *success = 0;
-        return from_string(vm, err_msg);
-    } else {
-        assert(status == IMPORT_STATUS_OK);
-        *success = 1;
-        *found_here_cptr = from_string(vm, found_here);
-        return from_string(vm, input);
+        *buf = from_string_nonull(vm, err_msg, buflen);
+        return 1;  // failure
     }
+
+    assert(status == IMPORT_STATUS_OK);
+    *found_here_cptr = from_string(vm, found_here);
+    *buf = from_string_nonull(vm, input, buflen);
+    return 0;  // success
 }
 
 #define TRY try {
diff --git a/core/parser.cpp b/core/parser.cpp
index 325c55a..1a4e8ff 100644
--- a/core/parser.cpp
+++ b/core/parser.cpp
@@ -584,6 +584,7 @@ class Parser {
             case Token::IN:
             case Token::IMPORT:
             case Token::IMPORTSTR:
+            case Token::IMPORTBIN:
             case Token::LOCAL:
             case Token::PAREN_R:
             case Token::SEMICOLON:
@@ -860,6 +861,23 @@ class Parser {
                 }
             }
 
+            case Token::IMPORTBIN: {
+                pop();
+                AST *body = parse(MAX_PRECEDENCE);
+                if (body->type == AST_LITERAL_STRING) {
+                    auto *lit = static_cast<LiteralString *>(body);
+                    if (lit->tokenKind == LiteralString::BLOCK) {
+                        throw StaticError(lit->location,
+                                          "Cannot use text blocks in import statements.");
+                    }
+                    return alloc->make<Importbin>(span(begin, body), begin.fodder, lit);
+                } else {
+                    std::stringstream ss;
+                    ss << "computed imports are not allowed.";
+                    throw StaticError(body->location, ss.str());
+                }
+            }
+
             case Token::LOCAL: {
                 pop();
                 Local::Binds binds;
@@ -934,7 +952,7 @@ class Parser {
                     op_precedence = APPLY_PRECEDENCE;
                     break;
 
-                default: 
+                default:
                     // This happens when we reach EOF or the terminating token of an outer context.
                     return lhs;
             }
diff --git a/core/parser_test.cpp b/core/parser_test.cpp
index 97242dc..9de3736 100644
--- a/core/parser_test.cpp
+++ b/core/parser_test.cpp
@@ -118,6 +118,7 @@ TEST(Parser, TestTuple)
 
     testParse("import 'foo.jsonnet'");
     testParse("importstr 'foo.text'");
+    testParse("importbin 'foo.text'");
 
     testParse("{a: b} + {c: d}");
     testParse("{a: b}{c: d}");
@@ -315,6 +316,9 @@ TEST(Parser, TestInvalidImport)
     testParseError("importstr (a b)",
                    R"_(test:1:14: expected token ")" but got (IDENTIFIER, "b"))_");
     testParseError("importstr (a+b)", "test:1:11-16: computed imports are not allowed.");
+    testParseError("importbin (a b)",
+                   R"_(test:1:14: expected token ")" but got (IDENTIFIER, "b"))_");
+    testParseError("importbin (a+b)", "test:1:11-16: computed imports are not allowed.");
 }
 
 TEST(Parser, TestInvalidOperator)
diff --git a/core/pass.cpp b/core/pass.cpp
index 89c0ddc..ec3ca64 100644
--- a/core/pass.cpp
+++ b/core/pass.cpp
@@ -194,6 +194,11 @@ void CompilerPass::visit(Importstr *ast)
     visit(ast->file);
 }
 
+void CompilerPass::visit(Importbin *ast)
+{
+    visit(ast->file);
+}
+
 void CompilerPass::visit(InSuper *ast)
 {
     expr(ast->element);
@@ -307,6 +312,7 @@ void CompilerPass::visitExpr(AST *&ast_)
         VISIT(ast_, AST_FUNCTION, Function);
         VISIT(ast_, AST_IMPORT, Import);
         VISIT(ast_, AST_IMPORTSTR, Importstr);
+        VISIT(ast_, AST_IMPORTBIN, Importbin);
         VISIT(ast_, AST_INDEX, Index);
         VISIT(ast_, AST_IN_SUPER, InSuper);
         VISIT(ast_, AST_LITERAL_BOOLEAN, LiteralBoolean);
@@ -367,6 +373,7 @@ void ClonePass::expr(AST *&ast_)
         CLONE(ast_, AST_FUNCTION, Function);
         CLONE(ast_, AST_IMPORT, Import);
         CLONE(ast_, AST_IMPORTSTR, Importstr);
+        CLONE(ast_, AST_IMPORTBIN, Importbin);
         CLONE(ast_, AST_INDEX, Index);
         CLONE(ast_, AST_IN_SUPER, InSuper);
         CLONE(ast_, AST_LITERAL_BOOLEAN, LiteralBoolean);
diff --git a/core/pass.h b/core/pass.h
index 230294e..76237f5 100644
--- a/core/pass.h
+++ b/core/pass.h
@@ -69,6 +69,8 @@ class CompilerPass {
 
     virtual void visit(Importstr *ast);
 
+    virtual void visit(Importbin *ast);
+
     virtual void visit(InSuper *ast);
 
     virtual void visit(Index *ast);
diff --git a/core/static_analysis.cpp b/core/static_analysis.cpp
index 3cd68cf..ddacc5b 100644
--- a/core/static_analysis.cpp
+++ b/core/static_analysis.cpp
@@ -110,6 +110,10 @@ static IdSet static_analysis(AST *ast_, bool in_object, const IdSet &vars)
         assert(dynamic_cast<Importstr *>(ast_));
         // Nothing to do.
     } break;
+    case AST_IMPORTBIN: {
+        assert(dynamic_cast<Importbin *>(ast_));
+        // Nothing to do.
+    } break;
     case AST_IN_SUPER: {
         assert(dynamic_cast<const InSuper *>(ast_));
         auto* ast = static_cast<const InSuper *>(ast_);
diff --git a/core/vm.cpp b/core/vm.cpp
index 1fd8ab9..464170d 100644
--- a/core/vm.cpp
+++ b/core/vm.cpp
@@ -516,9 +516,10 @@ class Interpreter {
     struct ImportCacheValue {
         std::string foundHere;
         std::string content;
+
         /** Thunk to store cached result of execution.
          *
-         * Null if this file was only ever successfully imported with importstr.
+         * Null if this file was only ever successfully imported with importstr/importbin.
          */
         HeapThunk *thunk;
     };
@@ -770,7 +771,7 @@ class Interpreter {
      */
     HeapThunk *import(const LocationRange &loc, const LiteralString *file)
     {
-        ImportCacheValue *input = importString(loc, file);
+        ImportCacheValue *input = importData(loc, file);
         if (input->thunk == nullptr) {
             Tokens tokens = jsonnet_lex(input->foundHere, input->content.c_str());
             AST *expr = jsonnet_parse(alloc, tokens);
@@ -783,7 +784,7 @@ class Interpreter {
         return input->thunk;
     }
 
-    /** Import a file as a string.
+    /** Import a file as a string or byte array.
      *
      * If the file has already been imported, then use that version.  This maintains
      * referential transparency in the case of writes to disk during execution.
@@ -792,7 +793,7 @@ class Interpreter {
      * \param file Path to the filename.
      * \param found_here If non-null, used to store the actual path of the file
      */
-    ImportCacheValue *importString(const LocationRange &loc, const LiteralString *file)
+    ImportCacheValue *importData(const LocationRange &loc, const LiteralString *file)
     {
         std::string dir = dir_name(loc.file);
 
@@ -803,18 +804,20 @@ class Interpreter {
         if (cached_value != nullptr)
             return cached_value;
 
-        int success = 0;
         char *found_here_cptr;
-        char *content = importCallback(importCallbackContext,
-                                       dir.c_str(),
-                                       encode_utf8(path).c_str(),
-                                       &found_here_cptr,
-                                       &success);
-
-        std::string input(content);
-        ::free(content);
-
-        if (!success) {
+        char *buf = NULL;
+        size_t buflen = 0;
+        int result = importCallback(importCallbackContext,
+                                    dir.c_str(),
+                                    encode_utf8(path).c_str(),
+                                    &found_here_cptr,
+                                    &buf,
+                                    &buflen);
+
+        std::string input(buf, buflen);
+        ::free(buf);
+
+        if (result == 1) {  // failure
             std::string epath = encode_utf8(jsonnet_string_escape(path, false));
             std::string msg = "couldn't open import \"" + epath + "\": ";
             msg += input;
@@ -1466,11 +1469,11 @@ class Interpreter {
         auto &elements = static_cast<HeapArray *>(scratch.v.h)->elements;
         while (test < str->value.size() && (maxsplits == -1 ||
                                             size_t(maxsplits) > elements.size())) {
-            if (c->value[0] == str->value[test]) {
+            if (c->value == str->value.substr(test, c->value.size())) {
                 auto *th = makeHeap<HeapThunk>(idArrayElement, nullptr, 0, nullptr);
                 elements.push_back(th);
                 th->fill(makeString(str->value.substr(start, test - start)));
-                start = test + 1;
+                start = test + c->value.size();
                 test = start;
             } else {
                 ++test;
@@ -2066,10 +2069,23 @@ class Interpreter {
 
             case AST_IMPORTSTR: {
                 const auto &ast = *static_cast<const Importstr *>(ast_);
-                const ImportCacheValue *value = importString(ast.location, ast.file);
+                const ImportCacheValue *value = importData(ast.location, ast.file);
                 scratch = makeString(decode_utf8(value->content));
             } break;
 
+            case AST_IMPORTBIN: {
+                const auto &ast = *static_cast<const Importbin *>(ast_);
+                const ImportCacheValue *value = importData(ast.location, ast.file);
+                scratch = makeArray({});
+                auto &elements = static_cast<HeapArray *>(scratch.v.h)->elements;
+                elements.reserve(value->content.size());
+                for (const auto c : value->content) {
+                    auto *th = makeHeap<HeapThunk>(idArrayElement, nullptr, 0, nullptr);
+                    elements.push_back(th);
+                    th->fill(makeNumber(uint8_t(c)));
+                }
+            } break;
+
             case AST_IN_SUPER: {
                 const auto &ast = *static_cast<const InSuper *>(ast_);
                 stack.newFrame(FRAME_IN_SUPER_ELEMENT, ast_);
diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt
index eb7686c..40082fb 100644
--- a/cpp/CMakeLists.txt
+++ b/cpp/CMakeLists.txt
@@ -15,7 +15,7 @@ add_dependencies(libjsonnet++ jsonnet)
 # CMake prepends CMAKE_SHARED_LIBRARY_PREFIX to shared libraries, so without
 # this step the output would be |liblibjsonnet|.
 set_target_properties(libjsonnet++ PROPERTIES OUTPUT_NAME jsonnet++
-	VERSION     "0.18.0"
+	VERSION     "0.19.1"
 	SOVERSION   "0"
 	PUBLIC_HEADER "${LIB_HEADER}")
 install(TARGETS libjsonnet++
diff --git a/cpp/libjsonnet++.cpp b/cpp/libjsonnet++.cpp
index 085a3d1..4f0ebf3 100644
--- a/cpp/libjsonnet++.cpp
+++ b/cpp/libjsonnet++.cpp
@@ -94,12 +94,14 @@ bool Jsonnet::evaluateFile(const std::string& filename, std::string* output)
         return false;
     }
     int error = 0;
-    const char* jsonnet_output = ::jsonnet_evaluate_file(vm_, filename.c_str(), &error);
+    char* jsonnet_output = ::jsonnet_evaluate_file(vm_, filename.c_str(), &error);
     if (error != 0) {
         last_error_.assign(jsonnet_output);
+        jsonnet_realloc(vm_, jsonnet_output, 0);
         return false;
     }
     output->assign(jsonnet_output);
+    jsonnet_realloc(vm_, jsonnet_output, 0);
     return true;
 }
 
@@ -110,13 +112,15 @@ bool Jsonnet::evaluateSnippet(const std::string& filename, const std::string& sn
         return false;
     }
     int error = 0;
-    const char* jsonnet_output =
+    char* jsonnet_output =
         ::jsonnet_evaluate_snippet(vm_, filename.c_str(), snippet.c_str(), &error);
     if (error != 0) {
         last_error_.assign(jsonnet_output);
+        jsonnet_realloc(vm_, jsonnet_output, 0);
         return false;
     }
     output->assign(jsonnet_output);
+    jsonnet_realloc(vm_, jsonnet_output, 0);
     return true;
 }
 
@@ -146,12 +150,14 @@ bool Jsonnet::evaluateFileMulti(const std::string& filename,
         return false;
     }
     int error = 0;
-    const char* jsonnet_output = ::jsonnet_evaluate_file_multi(vm_, filename.c_str(), &error);
+    char* jsonnet_output = ::jsonnet_evaluate_file_multi(vm_, filename.c_str(), &error);
     if (error != 0) {
         last_error_.assign(jsonnet_output);
+        jsonnet_realloc(vm_, jsonnet_output, 0);
         return false;
     }
     parseMultiOutput(jsonnet_output, outputs);
+    jsonnet_realloc(vm_, jsonnet_output, 0);
     return true;
 }
 
@@ -162,13 +168,15 @@ bool Jsonnet::evaluateSnippetMulti(const std::string& filename, const std::strin
         return false;
     }
     int error = 0;
-    const char* jsonnet_output =
+    char* jsonnet_output =
         ::jsonnet_evaluate_snippet_multi(vm_, filename.c_str(), snippet.c_str(), &error);
     if (error != 0) {
         last_error_.assign(jsonnet_output);
+        jsonnet_realloc(vm_, jsonnet_output, 0);
         return false;
     }
     parseMultiOutput(jsonnet_output, outputs);
+    jsonnet_realloc(vm_, jsonnet_output, 0);
     return true;
 }
 
diff --git a/debian/changelog b/debian/changelog
index 14c19a5..e83315d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+jsonnet (0.19.1+ds-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sat, 31 Dec 2022 18:47:34 -0000
+
 jsonnet (0.18.0+ds-2) unstable; urgency=medium
 
   * fix FTBFS on several release architectures (Closes: #1013310)
diff --git a/debian/patches/0001-fix-FTBFS-on-several-release-architectures.patch b/debian/patches/0001-fix-FTBFS-on-several-release-architectures.patch
index c3fad06..8335e16 100644
--- a/debian/patches/0001-fix-FTBFS-on-several-release-architectures.patch
+++ b/debian/patches/0001-fix-FTBFS-on-several-release-architectures.patch
@@ -7,10 +7,10 @@ Last-Update: 2022-06-30
  .../ext/c4core/src/c4/ext/debugbreak/debugbreak.h  | 83 +++++++++++-----------
  1 file changed, 43 insertions(+), 40 deletions(-)
 
-diff --git a/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h b/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h
-index f570bf8..e91121d 100644
---- a/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h
-+++ b/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h
+Index: jsonnet.git/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h
+===================================================================
+--- jsonnet.git.orig/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h
++++ jsonnet.git/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/ext/debugbreak/debugbreak.h
 @@ -1,4 +1,4 @@
 -/* Copyright (c) 2011-2015, Scott Tsai
 +/* Copyright (c) 2011-2018, Scott Tsai
@@ -60,7 +60,7 @@ index f570bf8..e91121d 100644
  /* FIXME: handle __THUMB_INTERWORK__ */
  __attribute__((gnu_inline, always_inline))
  __inline__ static void trap_instruction(void)
-@@ -69,30 +62,35 @@ __inline__ static void trap_instruction(void)
+@@ -69,30 +62,35 @@ __inline__ static void trap_instruction(
  #endif
  
  	/* Known problem:
@@ -105,7 +105,7 @@ index f570bf8..e91121d 100644
  __attribute__((gnu_inline, always_inline))
  __inline__ static void trap_instruction(void)
  {
-@@ -101,34 +99,39 @@ __inline__ static void trap_instruction(void)
+@@ -101,34 +99,39 @@ __inline__ static void trap_instruction(
  	__asm__ volatile(".inst 0xd4200000");
  }
  #else
diff --git a/debian/patches/0002-fix-compile-error-in-armel.patch b/debian/patches/0002-fix-compile-error-in-armel.patch
index c012767..3ffb479 100644
--- a/debian/patches/0002-fix-compile-error-in-armel.patch
+++ b/debian/patches/0002-fix-compile-error-in-armel.patch
@@ -7,10 +7,10 @@ Last-Update: 2022-06-30
  third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp | 1 +
  1 file changed, 1 insertion(+)
 
-diff --git a/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp b/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp
-index ca3f788..2c006b8 100644
---- a/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp
-+++ b/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp
+Index: jsonnet.git/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp
+===================================================================
+--- jsonnet.git.orig/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp
++++ jsonnet.git/third_party/rapidyaml/rapidyaml/ext/c4core/src/c4/cpu.hpp
 @@ -59,6 +59,7 @@
          || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM >= 6)
  #           define C4_CPU_ARMV6
diff --git a/doc/_stdlib_gen/stdlib-content.jsonnet b/doc/_stdlib_gen/stdlib-content.jsonnet
index d6c228e..47b200a 100644
--- a/doc/_stdlib_gen/stdlib-content.jsonnet
+++ b/doc/_stdlib_gen/stdlib-content.jsonnet
@@ -66,7 +66,7 @@ local html = import 'html.libsonnet';
         {
           name: 'get',
           params: ['o', 'f', 'default=null', 'inc_hidden=true'],
-          availableSince: 'upcoming',
+          availableSince: '0.18.0',
           description: |||
             Returns the object's field if it exists or default value otherwise.
             <code>inc_hidden</code> controls whether to include hidden fields.
@@ -339,36 +339,58 @@ local html = import 'html.libsonnet';
         {
           name: 'split',
           params: ['str', 'c'],
-          description: |||
-            Split the string <code>str</code> into an array of strings, divided by the single character
-            <code>c</code>.
-          |||,
+          description: [
+            html.p({}, |||
+              Split the string <code>str</code> into an array of strings, divided by the string
+              <code>c</code>.
+            |||),
+            html.p({}, |||
+              Note: Versions up to and including 0.18.0 require <code>c</code> to be a single character.
+            |||),
+          ],
           examples: [
             {
-              input: @'std.split("foo/bar", "/")',
-              output: std.split('foo/bar', '/'),
+              input: @'std.split("foo/_bar", "/_")',
+              output: std.split('foo/_bar', '/_'),
             },
             {
-              input: @'std.split("/foo/", "/")',
-              output: std.split('/foo/', '/'),
+              input: @'std.split("/_foo/_bar", "/_")',
+              output: std.split('/_foo/_bar', '/_'),
             },
           ],
         },
         {
           name: 'splitLimit',
           params: ['str', 'c', 'maxsplits'],
-          description: |||
-            As std.split(str, c) but will stop after <code>maxsplits</code> splits, thereby the largest
-            array it will return has length <code>maxsplits + 1</code>.  A limit of -1 means unlimited.
-          |||,
+          description: [
+            html.p({}, |||
+              As <code>std.split(str, c)</code> but will stop after <code>maxsplits</code> splits, thereby the largest
+              array it will return has length <code>maxsplits + 1</code>. A limit of <code>-1</code> means unlimited.
+            |||),
+            html.p({}, |||
+              Note: Versions up to and including 0.18.0 require <code>c</code> to be a single character.
+            |||),
+          ],
           examples: [
             {
-              input: @'std.splitLimit("foo/bar", "/", 1)',
-              output: std.splitLimit('foo/bar', '/', 1),
+              input: @'std.splitLimit("foo/_bar", "/_", 1)',
+              output: std.splitLimit('foo/_bar', '/_', 1),
             },
             {
-              input: @'std.splitLimit("/foo/bar", "/", 1)',
-              output: std.splitLimit('/foo/bar', '/', 1),
+              input: @'std.splitLimit("/_foo/_bar", "/_", 1)',
+              output: std.splitLimit('/_foo/_bar', '/_', 1),
+            },
+          ],
+        },
+        {
+          name: 'splitLimitR',
+          params: ['str', 'c', 'maxsplits'],
+          availableSince: 'v0.19.0',
+          description: 'As <code>std.splitLimit(str, c, maxsplits)</code> but will split from right to left.',
+          examples: [
+            {
+              input: @'std.splitLimitR("/_foo/_bar", "/_", 1)',
+              output: std.splitLimitR('/_foo/_bar', '/_', 1),
             },
           ],
         },
@@ -565,7 +587,7 @@ local html = import 'html.libsonnet';
         },
         {
           name: 'parseYaml',
-          availableSince: 'x.y.z',
+          availableSince: '0.18.0',
           params: ['str'],
           description: |||
             Parses a YAML string. This is provided as a "best-effort" mechanism and should not be relied on to provide
@@ -759,7 +781,7 @@ local html = import 'html.libsonnet';
         {
           name: 'manifestJsonMinified',
           params: ['value'],
-          availableSince: 'upcoming',
+          availableSince: '0.18.0',
           description: |||
                 Convert the given object to a minified JSON form. Under the covers,
                 it calls <code>std.manifestJsonEx:')</code>:
@@ -943,7 +965,7 @@ local html = import 'html.libsonnet';
         {
           name: 'manifestTomlEx',
           params: ['toml', 'indent'],
-          availableSince: 'upcoming',
+          availableSince: '0.18.0',
           description: |||
             Convert the given object to a TOML form. <code>indent</code> is a string containing
             one or more whitespaces that are used for indentation:
@@ -1244,6 +1266,32 @@ local html = import 'html.libsonnet';
             |||,
           ]),
         },
+        {
+          name: 'all',
+          params: ['arr'],
+          availableSince: 'v0.19.0',
+          description: html.paragraphs([
+            |||
+              Return true if all elements of <code>arr</code> is true, false otherwise. <code>all([])</code> evaluates to true.
+            |||,
+            |||
+              It's an error if 1) <code>arr</code> is not an array, or 2) <code>arr</code> contains non-boolean values.
+            |||,
+          ]),
+        },
+        {
+          name: 'any',
+          params: ['arr'],
+          availableSince: 'v0.19.0',
+          description: html.paragraphs([
+            |||
+              Return true if any element of <code>arr</code> is true, false otherwise. <code>any([])</code> evaluates to false.
+            |||,
+            |||
+              It's an error if 1) <code>arr</code> is not an array, or 2) <code>arr</code> contains non-boolean values.
+            |||,
+          ]),
+        },
       ],
     },
     {
diff --git a/doc/articles/kubernetes.html b/doc/articles/kubernetes.html
index b2f8daa..88c2968 100644
--- a/doc/articles/kubernetes.html
+++ b/doc/articles/kubernetes.html
@@ -63,28 +63,29 @@ title: Kubernetes
           example</a> from the Jsonnet repo.
         </li>
         <li>
-          <a href="https://github.com/bitnami/kubecfg">Kubecfg</a> (whose name may sound familiar to
+          <a href="https://github.com/kubecfg/kubecfg">Kubecfg</a> (whose name may sound familiar to
           current or ex-Googlers) is an unopiniated tool for evaluating Jsonnet and
           pushing the results to Kubernetes.  It comes with a <a
           href="https://github.com/bitnami-labs/kube-libsonnet">useful template library</a>.  See
           this <a href="https://engineering.bitnami.com/articles/an-example-of-real-kubernetes-bitnami.html">blog post</a>.
         </li>
         <li>
-          <a href="https://ksonnet.io">Ksonnet</a>, by <a href="https://heptio.com">Heptio</a> is a
-          fork of kubecfg which uses a Jsonnet library that is automatically generated from
-          Kubernetes API specifications.  This forms a framework in which Kubernetes objects can be
-          built using high-level composable descriptions.
+          <a href="https://tanka.dev">Tanka</a>, by <a href="https://grafana.com">Grafana Labs</a>
+          is the spiritual successor of Ksonnet. It extends jsonnet with native functions
+          to import Kustomize manifests and Helm charts, making it an allround tool to
+          work with Kubernetes.
+        </li>
+        <li>
+          <a href="https://github.com/jsonnet-libs/k8s">jsonnet-libs/k8s</a>
+          generator produces over 30 Jsonnet Kubernetes libraries and counting, with most
+          notably the <a href="https://jsonnet-libs.github.io/k8s-libsonnet/">k8s-libsonnet</a> library
+          as the succesor of ksonnet-lib.
         </li>
         <li>
           <a href="https://github.com/deepmind/kapitan">Kapitan</a> by <a
           href="https://deepmind.com">Deepmind</a> is another tool for driving Kubernetes with
           Jsonnet and textual templating.
         </li>
-        <li>
-          <a href="https://github.com/coreos/kpm">Kpm</a> is a similar deployment tool (now
-          deprecated) by <a href="https://coreos.com">CoreOS</a> that uses a mixture of Jsonnet and
-          Jinja templating.
-        </li>
         <li>
           <a href="https://www.box.com">Box</a> have <a
           href="https://blog.box.com/blog/kubernetes-box-microservices-maximum-velocity/">blogged</a>
diff --git a/doc/index.html b/doc/index.html
index 6a6efb1..35ab580 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -303,11 +303,7 @@ title: The Data Templating Language
           <img class=user-logo src=img/users/daydream-logo.png></a>
         -->
         <a href='https://deepmind.com'><img class=user-logo src=img/users/deepmind.png></a>
-        <!--
         <a href='https://grafana.com'><img class=user-logo src=img/users/grafana.svg></a>
-        -->
-        <a href='https://www.heptio.com'><img class=user-logo src=img/users/heptio.jpg></a>
-        <a href='https://kausal.co'><img class=user-logo src=img/users/kausal.svg></a>
         <!--
         <a href='https://www.spinnaker.io'><img class=user-logo src=img/users/spinnaker.png></a>
         -->
diff --git a/doc/js/codemirror-mode-jsonnet.js b/doc/js/codemirror-mode-jsonnet.js
index a3e87f5..b0b826b 100644
--- a/doc/js/codemirror-mode-jsonnet.js
+++ b/doc/js/codemirror-mode-jsonnet.js
@@ -118,28 +118,28 @@
         }
 
         // Imports (including the strings after them).
-        if (stream.match(/import(?:str)?\s*"/)) {
+        if (stream.match(/import(?:str|bin)?\s*"/)) {
           state.importString = true;
           state.stringSingle = false;
           state.stringRaw = false;
           return "meta";
         }
 
-        if (stream.match(/import(?:str)?\s*'/)) {
+        if (stream.match(/import(?:str|bin)?\s*'/)) {
           state.importString = true;
           state.stringSingle = true;
           state.stringRaw = false;
           return "meta";
         }
 
-        if (stream.match(/import(?:str)?\s*@"/)) {
+        if (stream.match(/import(?:str|bin)?\s*@"/)) {
           state.importString = true;
           state.stringSingle = false;
           state.stringRaw = true;
           return "meta";
         }
 
-        if (stream.match(/import(?:str)?\s*@'/)) {
+        if (stream.match(/import(?:str|bin)?\s*@'/)) {
           state.importString = true;
           state.stringSingle = true;
           state.stringRaw = true;
diff --git a/doc/learning/getting_started.html b/doc/learning/getting_started.html
index ac4bfdd..778b0a6 100644
--- a/doc/learning/getting_started.html
+++ b/doc/learning/getting_started.html
@@ -202,7 +202,7 @@ local
         separated by <tt>---</tt> and terminated with <tt>...</tt>.  Any YAML parser <i>should</i>
         interpret this as a YAML stream (people have reported broken parsers, so try it out first).
       </p>
-      <pre>$ jsonnet -y . yaml_stream.jsonnet
+      <pre>$ jsonnet -y yaml_stream.jsonnet
 ---
 {
    "x": 1,
diff --git a/doc/learning/tools.html b/doc/learning/tools.html
index 9adf6ee..d8b59e8 100644
--- a/doc/learning/tools.html
+++ b/doc/learning/tools.html
@@ -27,16 +27,20 @@ title: Tooling
           Editor integration for syntax highlighting and other features
           <ul>
             <li>
-              <a href="https://heptio.com">Heptio</a> maintain a <a
-              href="https://github.com/heptio/vscode-jsonnet">vscode</a> extension with quite
-              sophisticated program analysis
-              </li>
+              <a href="https://github.com/grafana/jsonnet-language-server">Language
+                  Server Protocol (LSP) server</a>
+              (maintained by <a href="https://grafana.com">Grafana Labs</a>)
+            </li>
+            <li>
+              <a href="https://github.com/grafana/vscode-jsonnet">vscode</a>
+              (maintained by <a href="https://grafana.com">Grafana Labs</a>)
+            </li>
             <li>
               <a href="https://github.com/google/vim-jsonnet">Vim</a>
             </li>
             <li>
-              <a href="https://github.com/google/codemirror-mode-jsonnet">Codemirror,</a> the editor
-              widget used by this website
+              <a href="https://github.com/google/codemirror-mode-jsonnet">Codemirror,</a>
+              the editor widget used by this website
             </li>
             <li>
               <a href="https://github.com/google/language-jsonnet">Atom</a>
@@ -46,11 +50,16 @@ title: Tooling
             </li>
             <li>
               <a href="https://databricks.com/">Databricks</a> maintain an <a
-              href="https://github.com/databricks/intellij-jsonnet">IntelliJ</a> plugin with jump to
-              imported files and local variables.
+                 href="https://github.com/databricks/intellij-jsonnet">IntelliJ</a>
+              plugin with jump to imported files and local variables
+            </li>
+            <li>
+              Another <a href="https://github.com/zzehring/intellij-jsonnet">Intellij</a>
+              with support for Grafana's LSP server
             </li>
             <li>
-              An <a href="https://github.com/tminor/jsonnet-mode">Emacs mode</a> is also available.
+              An <a href="https://github.com/tminor/jsonnet-mode">Emacs mode</a> is also
+              available
             </li>
           </ul>
         </li>
diff --git a/doc/learning/tutorial.html b/doc/learning/tutorial.html
index 52c39da..3ac5520 100644
--- a/doc/learning/tutorial.html
+++ b/doc/learning/tutorial.html
@@ -122,7 +122,7 @@ title: Tutorial
         <li><code>self</code> refers to the current object.</li>
         <li><code>$</code> refers to the outer-most object.</li>
         <li><code>['foo']</code> looks up a field.</li>
-        <li><code>.f</code> can be used if the field name is an identifier.</li>
+        <li><code>.foo</code> can be used if the field name is an identifier.</li>
         <li><code>[10]</code> looks up an array element.</li>
         <li>Arbitrarily long paths are allowed.</li>
         <li>Array slices like <code>arr[10:20:2]</code> are allowed, like in Python.</li>
@@ -363,6 +363,7 @@ title: Tutorial
         <li>Files designed for import by convention end with <tt>.libsonnet</tt></li>
         <li>Raw JSON can be imported this way too.</li>
         <li>The <code>importstr</code> construct is for verbatim UTF-8 text.</li>
+        <li>The <code>importbin</code> construct is for verbatim binary data.</li>
       </ul>
       <p>
         Usually, imported Jsonnet content is stashed in a top-level local variable.  This
@@ -740,7 +741,7 @@ title: Tutorial
       </p>
       <ul>
         <li>Hidden fields, defined with <code>::</code>, which do not appear in generated JSON</li>
-        <li>The <code>super</code> keyword, which has its usual meaning</li>
+        <li>The <code>super</code> keyword is used to access fields on a base object.</li>
         <li>The <code>+:</code> field syntax for overriding deeply nested fields</li>
       </ul>
     </div>
diff --git a/doc/ref/bindings.html b/doc/ref/bindings.html
index 59b7fdd..48b5eb6 100644
--- a/doc/ref/bindings.html
+++ b/doc/ref/bindings.html
@@ -26,7 +26,7 @@ title: Bindings
     </div>
     <div style="clear: both"></div>
   </div>
-</div>  
+</div>
 
 
 <div class="hgroup">
@@ -61,7 +61,7 @@ title: Bindings
     </div>
     <div style="clear: both"></div>
   </div>
-</div>  
+</div>
 
 
 <div class="hgroup">
@@ -141,7 +141,7 @@ title: Bindings
       </ul>
       <p>
         The argument <tt>import_callback</tt> can be used to pass a callable, to trap the Jsonnet
-        <code>import</code> and <code>importstr</code> constructs.  This allows, e.g., reading files
+        <code>import</code>, <code>importstr</code>, and <code>importbin</code> constructs.  This allows, e.g., reading files
         out of archives or implementing library search paths.  The argument <tt>native_callback</tt>
         is used to allow execution of arbitrary Python code via <code>std.native(...)</code>.  This
         is useful so Jsonnet code can access pure functions in the Python ecosystem, such as
@@ -173,11 +173,11 @@ json_str = _jsonnet.evaluate_snippet(
     ext_vars={'OTHER_NAME': 'Bob'})
 
 json_obj = json.loads(json_str)
-for person_id, person in json_obj.iteritems():
-  print '%s is %s, greeted by "%s"' % (
+for person_id, person in json_obj.items():
+  print('%s is %s, greeted by "%s"' % (
       person_id,
       person['name'],
-      person['welcome'])</pre>
+      person['welcome']))</pre>
     <div style="clear: both"></div>
   </div>
 </div>
diff --git a/doc/ref/language.html.md b/doc/ref/language.html.md
index c1a2001..74a848c 100644
--- a/doc/ref/language.html.md
+++ b/doc/ref/language.html.md
@@ -271,7 +271,7 @@ The `if` and `for` components can be freely mixed. It almost always makes sense
     [x, y]
     for x in std.range(1, 10)
     if x % 3 == 0
-    for y in std.range(1, 10),
+    for y in std.range(1, 10)
     if y % 2 == 0
   ]
 ```
@@ -536,7 +536,7 @@ The field separators `+:`, `+::`, `+:::` are relevant for nested objects (which
 
 It is not an error to have `+:` without a matching field on the left hand side. In such cases the right hand side field is used directly. E.g. both `{ foo +: { bar: "baz" } }` and `{} + { foo +: { bar: "baz" } }` evaluate to `{ foo: { bar: "baz" } }`.
 
-In all cases, these field separators are just syntax sugar and the same results can be achieved with `super`. More precisely `{ a +: b }` is equivalent to `{ a: if "a" in super then super.a + b else b }` (and similarly `+::` and `+:::`). 
+In all cases, these field separators are just syntax sugar and the same results can be achieved with `super`. More precisely `{ a +: b }` is equivalent to `{ a: if "a" in super then super.a + b else b }` (and similarly `+::` and `+:::`).
 
 
 #### Object Equality
@@ -604,7 +604,7 @@ It is possible to pass data from the environment, but only explicitly, by using
 
 Before using any of the methods described below, it is worth considering if a fully self-contained setup is viable.
 
-In this style, the configuration is a set of `.jsonnet` and `.libsonnet` files. Every output file corresponds to a `.jsonnet` file and all shared setup is in `.libsonnet` files. Any raw data can be placed in additional files and imported using `importstr`. Usually, all code and data is committed to a repository.
+In this style, the configuration is a set of `.jsonnet` and `.libsonnet` files. Every output file corresponds to a `.jsonnet` file and all shared setup is in `.libsonnet` files. Any raw data can be placed in additional files and imported using `importstr` or `importbin`. Usually, all code and data is committed to a repository.
 Sometimes the generated configuration is also checked in, which makes it easy to spot unintended changes.
 
 Sometimes it is not practical, though. For example if the produced configuration needs to contain secrets, which you do not want to commit alongside code, it is necessary to pass them from outside.
diff --git a/doc/ref/spec.html b/doc/ref/spec.html
index e66a4b1..526e01c 100644
--- a/doc/ref/spec.html
+++ b/doc/ref/spec.html
@@ -57,6 +57,7 @@ div.rules {
     \newcommand{\ifnoelse}[2]{\texttt{if }#1\texttt{ then }#2}
     \newcommand{\import}[1]{\texttt{import }#1}
     \newcommand{\importstr}[1]{\texttt{importstr }#1}
+    \newcommand{\importbin}[1]{\texttt{importbin }#1}
     \newcommand{\index}[2]{#1\texttt{[}#2\texttt{]}}
     \newcommand{\local}[2]{\texttt{local }#1\texttt{ ; }#2}
     \newcommand{\null}{\texttt{null}}
@@ -134,7 +135,7 @@ div.rules {
             Some identifiers are reserved as keywords, thus are not in the set <i>id</i>:
             <code>assert</code> <code>else</code> <code>error</code> <code>false</code>
             <code>for</code> <code>function</code> <code>if</code> <code>import</code>
-            <code>importstr</code> <code>in</code> <code>local</code> <code>null</code>
+            <code>importstr</code> <code>importbin</code> <code>in</code> <code>local</code> <code>null</code>
             <code>tailstrict</code> <code>then</code> <code>self</code> <code>super</code>
             <code>true</code>.
           </p>
@@ -490,6 +491,16 @@ div.rules {
           </td>
         </tr>
 
+        <!-- importbin -->
+        <tr>
+          <td></td>
+          <td> | </td>
+          <td>
+            <code>importbin</code>
+            <i>string</i>
+          </td>
+        </tr>
+
         <!-- error -->
         <tr>
           <td></td>
@@ -789,7 +800,7 @@ div.rules {
 
       <p>
         Everything is left associative.  In the case of <code>assert</code>, <code>error</code>,
-        <code>function</code>, <code>if</code>, <code>import</code>, <code>importstr</code>, and
+        <code>function</code>, <code>if</code>, <code>import</code>, <code>importstr</code>, <code>importbin</code>, and
         <code>local</code>, ambiguity is resolved by consuming as many tokens as possible on the
         right hand side.  For example the parentheses are redundant in <code>local x = 1; (x +
         x)</code>.  All remaining ambiguities are resolved according to the following decreasing
@@ -892,12 +903,12 @@ div.rules {
         notation to make the presentation more clear.
       </p>
       <p>
-        Also removed in the core language are <code>import</code> and <code>importstr</code>.  The
+        Also removed in the core language are <code>import</code>, <code>importstr</code>, and <code>importbin</code>.  The
         semantics of these constructs is that they are replaced with either the contents of the
         file, or an error construct if importing failed (e.g. due to I/O errors).  In the first
         case, the file is parsed, desugared, and subject to static checking before it can be
-        substituted.  In the latter case, the file is substituted in the form of a string, so it
-        merely needs to contain valid UTF-8.
+        substituted.  In the case of <code>importstr</code>, the file is substituted in the form of a string, so it
+        merely needs to contain valid UTF-8.  For <code>importbin</code>, the file is substituted as an array of integer numbers between 0 and 255 inclusive.
       </p>
       <p>
         A given Jsonnet file can be recursively imported via <code>import</code>. Thus, the
@@ -1701,6 +1712,14 @@ div.rules {
             }
           \]
         </div>
+        <div class="sequent-rule">
+          \[
+            \rule{chk-importbin} {
+            } {
+              Γ ⊢ \importbin{s}
+            }
+          \]
+        </div>
         <div class="sequent-rule">
           \[
             \rule{chk-error} {
diff --git a/doc/ref/stdlib.html b/doc/ref/stdlib.html
index d7c1b97..07a834a 100644
--- a/doc/ref/stdlib.html
+++ b/doc/ref/stdlib.html
@@ -428,7 +428,7 @@ title: Standard Library
 <div class="hgroup">
   <div class="hgroup-inline">
     <div class="panel">
-      <h4 id="std.clamp">
+      <h4 id="clamp">
         std.clamp(x, minVal, maxVal)
       </h4>
     </div>
@@ -785,14 +785,17 @@ title: Standard Library
   <div class="hgroup-inline">
     <div class="panel">
       <p>
-        Split the string <code>str</code> into an array of strings, divided by the single character
+        Split the string <code>str</code> into an array of strings, divided by the string
         <code>c</code>.
       </p>
       <p>
-        Example: <code>std.split("foo/bar", "/")</code> yields <code>[ "foo", "bar" ]</code>.
+        Note: Versions up to and including 0.18.0 require <code>c</code> to be a single character.
       </p>
       <p>
-        Example: <code>std.split("/foo/", "/")</code> yields <code>[ "", "foo", "" ]</code>.
+        Example: <code>std.split("foo/_bar", "/_")</code> yields <code>[ "foo", "bar" ]</code>.
+      </p>
+      <p>
+        Example: <code>std.split("/_foo/_bar", "/_")</code> yields <code>[ "", "foo", "bar" ]</code>.
       </p>
     </div>
     <div style="clear: both"></div>
@@ -813,14 +816,46 @@ title: Standard Library
   <div class="hgroup-inline">
     <div class="panel">
       <p>
-        As std.split(str, c) but will stop after <code>maxsplits</code> splits, thereby the largest
-        array it will return has length <code>maxsplits + 1</code>.  A limit of -1 means unlimited.
+        As <code>std.split(str, c)</code> but will stop after <code>maxsplits</code> splits, thereby the largest
+        array it will return has length <code>maxsplits + 1</code>. A limit of <code>-1</code> means unlimited.
+      </p>
+      <p>
+        Note: Versions up to and including 0.18.0 require <code>c</code> to be a single character.
+      </p>
+      <p>
+        Example: <code>std.splitLimit("foo/_bar", "/_", 1)</code> yields <code>[ "foo", "bar" ]</code>.
+      </p>
+      <p>
+        Example: <code>std.splitLimit("/_foo/_bar", "/_", 1)</code> yields <code>[ "", "foo/_bar" ]</code>.
+      </p>
+    </div>
+    <div style="clear: both"></div>
+  </div>
+</div>
+
+<div class="hgroup">
+  <div class="hgroup-inline">
+    <div class="panel">
+      <h4 id="splitLimitR">
+        std.splitLimitR(str, c, maxsplits)
+      </h4>
+    </div>
+    <div style="clear: both"></div>
+  </div>
+</div>
+<div class="hgroup">
+  <div class="hgroup-inline">
+    <div class="panel">
+      <p>
+        <em>
+          Available since version v0.19.0.
+        </em>
       </p>
       <p>
-        Example: <code>std.splitLimit("foo/bar", "/", 1)</code> yields <code>[ "foo", "bar" ]</code>.
+        As <code>std.splitLimit(str, c, maxsplits)</code> but will split from right to left.
       </p>
       <p>
-        Example: <code>std.splitLimit("/foo/bar", "/", 1)</code> yields <code>[ "", "foo/bar" ]</code>.
+        Example: <code>std.splitLimitR("/_foo/_bar", "/_", 1)</code> yields <code>[ "/_foo", "bar" ]</code>.
       </p>
     </div>
     <div style="clear: both"></div>
@@ -1189,7 +1224,7 @@ title: Standard Library
     <div class="panel">
       <p>
         <em>
-          Available since version x.y.z.
+          Available since version 0.18.0.
         </em>
       </p>
       <p>
@@ -2218,6 +2253,66 @@ e = {"f1": False, "f2": 42}</pre>
   </div>
 </div>
 
+<div class="hgroup">
+  <div class="hgroup-inline">
+    <div class="panel">
+      <h4 id="all">
+        std.all(arr)
+      </h4>
+    </div>
+    <div style="clear: both"></div>
+  </div>
+</div>
+<div class="hgroup">
+  <div class="hgroup-inline">
+    <div class="panel">
+      <p>
+        <em>
+          Available since version v0.19.0.
+        </em>
+      </p>
+      <p>
+        Return true if all elements of <code>arr</code> is true, false otherwise. <code>all([])</code> evaluates to true.
+      </p>
+      <p>
+        It's an error if 1) <code>arr</code> is not an array, or 2) <code>arr</code> contains non-boolean values.
+      </p>
+      
+    </div>
+    <div style="clear: both"></div>
+  </div>
+</div>
+
+<div class="hgroup">
+  <div class="hgroup-inline">
+    <div class="panel">
+      <h4 id="any">
+        std.any(arr)
+      </h4>
+    </div>
+    <div style="clear: both"></div>
+  </div>
+</div>
+<div class="hgroup">
+  <div class="hgroup-inline">
+    <div class="panel">
+      <p>
+        <em>
+          Available since version v0.19.0.
+        </em>
+      </p>
+      <p>
+        Return true if any element of <code>arr</code> is true, false otherwise. <code>any([])</code> evaluates to false.
+      </p>
+      <p>
+        It's an error if 1) <code>arr</code> is not an array, or 2) <code>arr</code> contains non-boolean values.
+      </p>
+      
+    </div>
+    <div style="clear: both"></div>
+  </div>
+</div>
+
 
 <div class="hgroup">
   <div class="hgroup-inline">
diff --git a/include/libjsonnet++.h b/include/libjsonnet++.h
index b744b31..ed14f31 100644
--- a/include/libjsonnet++.h
+++ b/include/libjsonnet++.h
@@ -17,6 +17,7 @@ limitations under the License.
 #ifndef CPP_JSONNET_H_
 #define CPP_JSONNET_H_
 
+#include <cstdint>
 #include <cstring>
 #include <functional>
 #include <map>
diff --git a/include/libjsonnet.h b/include/libjsonnet.h
index 94746b6..cf9e56d 100644
--- a/include/libjsonnet.h
+++ b/include/libjsonnet.h
@@ -31,7 +31,7 @@ limitations under the License.
  *
  * If this isn't the sae as jsonnet_version() then you've got a mismatched binary / header.
  */
-#define LIB_JSONNET_VERSION "v0.18.0"
+#define LIB_JSONNET_VERSION "v0.19.1"
 
 /** Return the version string of the Jsonnet interpreter.  Conforms to semantic versioning
  * https://semver.org/ If this does not match LIB_JSONNET_VERSION then there is a mismatch between
@@ -69,10 +69,12 @@ void jsonnet_string_output(struct JsonnetVm *vm, int v);
  *     process's CWD.  This is necessary so that imports from the content of the imported file can
  *     be resolved correctly.  Allocate memory with jsonnet_realloc.  Only use when *success = 1.
  * \param success Set this byref param to 1 to indicate success and 0 for failure.
- * \returns The content of the imported file, or an error message.
+ * \param buf Set this byref param to the content of the imported file, or an error message.  Allocate memory with jsonnet_realloc.  Do not include a null terminator byte.
+ * \param buflen Set this byref param to the length of the data returned in buf.
+ * \returns 0 to indicate success and 1 for failure.  On success, the content is in *buf.  On failure, an error message is in *buf.
  */
-typedef char *JsonnetImportCallback(void *ctx, const char *base, const char *rel, char **found_here,
-                                    int *success);
+typedef int JsonnetImportCallback(void *ctx, const char *base, const char *rel,
+                                  char **found_here, char **buf, size_t *buflen);
 
 /** An opaque type which can only be utilized via the jsonnet_json_* family of functions.
  */
diff --git a/python/BUILD b/python/BUILD
index 517c588..0c47741 100644
--- a/python/BUILD
+++ b/python/BUILD
@@ -19,7 +19,10 @@ py_library(
 py_test(
     name = "_jsonnet_test",
     srcs = ["_jsonnet_test.py"],
-    data = ["test.jsonnet"],
+    data = [
+        "testdata/basic_check.jsonnet",
+        "testdata/trivial.jsonnet",
+    ],
     python_version = "PY3",
     deps = [":_jsonnet"],
 )
diff --git a/python/_jsonnet.c b/python/_jsonnet.c
index 32e1aaa..d509b72 100644
--- a/python/_jsonnet.c
+++ b/python/_jsonnet.c
@@ -23,8 +23,17 @@ limitations under the License.
 
 static char *jsonnet_str(struct JsonnetVm *vm, const char *str)
 {
-    char *out = jsonnet_realloc(vm, NULL, strlen(str) + 1);
-    memcpy(out, str, strlen(str) + 1);
+    size_t size = strlen(str) + 1;
+    char *out = jsonnet_realloc(vm, NULL, size);
+    memcpy(out, str, size);
+    return out;
+}
+
+static char *jsonnet_str_nonull(struct JsonnetVm *vm, const char *str, size_t *buflen)
+{
+    *buflen = strlen(str);
+    char *out = jsonnet_realloc(vm, NULL, *buflen);
+    memcpy(out, str, *buflen);
     return out;
 }
 
@@ -138,7 +147,6 @@ static struct JsonnetJsonValue *cpython_native_callback(
     void *ctx_, const struct JsonnetJsonValue * const *argv, int *succ)
 {
     const struct NativeCtx *ctx = ctx_;
-    int i;
 
     PyEval_RestoreThread(*ctx->py_thread);
 
@@ -147,7 +155,7 @@ static struct JsonnetJsonValue *cpython_native_callback(
 
     // Populate python function args.
     arglist = PyTuple_New(ctx->argc);
-    for (i = 0; i < ctx->argc; ++i) {
+    for (size_t i = 0; i < ctx->argc; ++i) {
         double d;
         const char *param_str = jsonnet_json_extract_string(ctx->vm, argv[i]);
         int param_null = jsonnet_json_extract_null(ctx->vm, argv[i]);
@@ -209,12 +217,12 @@ struct ImportCtx {
     PyObject *callback;
 };
 
-static char *cpython_import_callback(void *ctx_, const char *base, const char *rel,
-                                     char **found_here, int *success)
+static int cpython_import_callback(void *ctx_, const char *base, const char *rel,
+                                   char **found_here, char **buf, size_t *buflen)
 {
     const struct ImportCtx *ctx = ctx_;
     PyObject *arglist, *result;
-    char *out;
+    int success;
 
     PyEval_RestoreThread(*ctx->py_thread);
     arglist = Py_BuildValue("(s, s)", base, rel);
@@ -223,47 +231,49 @@ static char *cpython_import_callback(void *ctx_, const char *base, const char *r
 
     if (result == NULL) {
         // Get string from exception
-        char *out = jsonnet_str(ctx->vm, exc_to_str());
-        *success = 0;
+        *buf = jsonnet_str_nonull(ctx->vm, exc_to_str(), buflen);
         PyErr_Clear();
         *ctx->py_thread = PyEval_SaveThread();
-        return out;
+        return 1; // failure
     }
 
     if (!PyTuple_Check(result)) {
-        out = jsonnet_str(ctx->vm, "import_callback did not return a tuple");
-        *success = 0;
+        *buf = jsonnet_str_nonull(ctx->vm, "import_callback did not return a tuple", buflen);
+        success = 0;
     } else if (PyTuple_Size(result) != 2) {
-        out = jsonnet_str(ctx->vm, "import_callback did not return a tuple (size 2)");
-        *success = 0;
+        *buf = jsonnet_str_nonull(ctx->vm, "import_callback did not return a tuple (size 2)", buflen);
+        success = 0;
     } else {
         PyObject *file_name = PyTuple_GetItem(result, 0);
         PyObject *file_content = PyTuple_GetItem(result, 1);
 #if PY_MAJOR_VERSION >= 3
-        if (!PyUnicode_Check(file_name) || !PyUnicode_Check(file_content)) {
+        if (!PyUnicode_Check(file_name) || !PyBytes_Check(file_content)) {
 #else
-        if (!PyString_Check(file_name) || !PyString_Check(file_content)) {
+        if (!PyString_Check(file_name) || !PyBytes_Check(file_content)) {
 #endif
-            out = jsonnet_str(ctx->vm, "import_callback did not return a pair of strings");
-            *success = 0;
+            *buf = jsonnet_str_nonull(ctx->vm, "import_callback did not return (string, bytes). Since 0.19.0 imports should be returned as bytes instead of as a string.  You may want to call .encode() on your string.", buflen);
+            success = 0;
         } else {
+            char *content_buf;
+            ssize_t content_len;
 #if PY_MAJOR_VERSION >= 3
             const char *found_here_cstr = PyUnicode_AsUTF8(file_name);
-            const char *content_cstr = PyUnicode_AsUTF8(file_content);
 #else
             const char *found_here_cstr = PyString_AsString(file_name);
-            const char *content_cstr = PyString_AsString(file_content);
 #endif
+            PyBytes_AsStringAndSize(file_content, &content_buf, &content_len);
             *found_here = jsonnet_str(ctx->vm, found_here_cstr);
-            out = jsonnet_str(ctx->vm, content_cstr);
-            *success = 1;
+            *buflen = content_len;
+            *buf = jsonnet_realloc(ctx->vm, NULL, *buflen);
+            memcpy(*buf, content_buf, *buflen);
+            success = 1;
         }
     }
 
     Py_DECREF(result);
     *ctx->py_thread = PyEval_SaveThread();
 
-    return out;
+    return success ? 0 : 1;
 }
 
 static PyObject *handle_result(struct JsonnetVm *vm, char *out, int error)
@@ -644,7 +654,7 @@ static PyMethodDef module_methods[] = {
 };
 
 #if PY_MAJOR_VERSION >= 3
-static struct PyModuleDef _jsonnet =
+static struct PyModuleDef _module =
 {
     PyModuleDef_HEAD_INIT,
     "_jsonnet",
@@ -655,11 +665,20 @@ static struct PyModuleDef _jsonnet =
 
 PyMODINIT_FUNC PyInit__jsonnet(void)
 {
-    return PyModule_Create(&_jsonnet);
+    PyObject *module = PyModule_Create(&_module);
+    PyObject *version_str = PyUnicode_FromString(LIB_JSONNET_VERSION);
+    if (PyModule_AddObject(module, "version", PyUnicode_FromString(LIB_JSONNET_VERSION)) < 0) {
+      Py_XDECREF(version_str);
+    }
+    return module;
 }
 #else
 PyMODINIT_FUNC init_jsonnet(void)
 {
-    Py_InitModule3("_jsonnet", module_methods, "A Python interface to Jsonnet.");
+    PyObject *module = Py_InitModule3("_jsonnet", module_methods, "A Python interface to Jsonnet.");
+    PyObject *version_str = PyUnicode_FromString(LIB_JSONNET_VERSION);
+    if (PyModule_AddObject(module, "version", PyString_FromString(LIB_JSONNET_VERSION)) < 0) {
+      Py_XDECREF(version_str);
+    }
 }
 #endif
diff --git a/python/_jsonnet_test.py b/python/_jsonnet_test.py
index 838fe83..0e159ba 100644
--- a/python/_jsonnet_test.py
+++ b/python/_jsonnet_test.py
@@ -13,13 +13,18 @@
 # limitations under the License.
 
 import os
+import sys
 import unittest
 
 import _jsonnet
 
 
-#  Returns content if worked, None if file not found, or throws an exception
-def try_path(dir, rel):
+# Returns (full_path, contents) if the file was successfully retrieved,
+# (full_path, None) if file not found, or throws an exception when the path
+# is invalid or an IO error occured.
+# It caches both hits and misses in the `cache` dict. Exceptions
+# do not need to be cached, because they abort the computation anyway.
+def try_path_cached(cache, dir, rel):
     if not rel:
         raise RuntimeError('Got invalid filename (empty string).')
     if rel[0] == '/':
@@ -28,19 +33,24 @@ def try_path(dir, rel):
         full_path = dir + rel
     if full_path[-1] == '/':
         raise RuntimeError('Attempted to import a directory')
-
-    if not os.path.isfile(full_path):
-        return full_path, None
-    with open(full_path) as f:
-        return full_path, f.read()
-
-
-def import_callback(dir, rel):
-    full_path, content = try_path(dir, rel)
+    if full_path not in cache:
+        if not os.path.isfile(full_path):
+            cache[full_path] = None
+        else:
+            with open(full_path) as f:
+                cache[full_path] = f.read().encode()
+    return full_path, cache[full_path]
+
+def import_callback_encode(dir, rel):
+    cache = {}
+    full_path, content = try_path_cached(cache, dir, rel)
     if content:
         return full_path, content
     raise RuntimeError('File not found')
 
+def import_callback_empty_file_encode(dir, rel):
+    return dir, b''
+
 
 # Test native extensions
 def concat(a, b):
@@ -68,30 +78,106 @@ native_callbacks = {
 
 class JsonnetTests(unittest.TestCase):
     def setUp(self):
-        self.input_filename = os.path.join(
-            os.path.dirname(__file__),
-            "test.jsonnet",
-        )
-        self.expected_str = "true\n"
+        base_dir = os.path.join(os.path.dirname(__file__), "testdata")
+        self.input_filename = os.path.join(base_dir, "basic_check.jsonnet")
+        self.trivial_filename = os.path.join(base_dir, "trivial.jsonnet")
+        self.test_filename = os.path.join(base_dir, "test.jsonnet")
         with open(self.input_filename, "r") as infile:
             self.input_snippet = infile.read()
 
-    def test_evaluate_file(self):
+    def test_version(self):
+        self.assertEqual(type(_jsonnet.version), str)
+
+    def test_evaluate_file_encode(self):
         json_str = _jsonnet.evaluate_file(
             self.input_filename,
-            import_callback=import_callback,
+            import_callback=import_callback_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "true\n")
+
+    def test_evaluate_snippet_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            self.input_snippet,
+            import_callback=import_callback_encode,
             native_callbacks=native_callbacks,
         )
-        self.assertEqual(json_str, self.expected_str)
+        self.assertEqual(json_str, "true\n")
 
-    def test_evaluate_snippet(self):
+    def test_evaluate_snippet_encode(self):
         json_str = _jsonnet.evaluate_snippet(
-            "snippet",
+            self.test_filename,
             self.input_snippet,
-            import_callback=import_callback,
+            import_callback=import_callback_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "true\n")
+
+    def test_import_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "import 'trivial.jsonnet'",
+            import_callback=import_callback_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "42\n")
+
+    def test_import_no_eol_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "import 'trivial_no_eol.jsonnet'",
+            import_callback=import_callback_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "42\n")
+
+    def test_import_binary_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "importbin 'binary123.bin'",
+            import_callback=import_callback_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "[\n   1,\n   2,\n   3\n]\n")
+
+    def test_import_binary_sentinel_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "importbin 'binary1230123.bin'",
+            import_callback=import_callback_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "[\n   1,\n   2,\n   3,\n   0,\n   1,\n   2,\n   3\n]\n")
+
+    def test_import_str_empty_file_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "importstr 'binary123.bin'",
+            import_callback=import_callback_empty_file_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "\"\"\n")
+
+    def test_import_binary_empty_file_encode(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "importbin 'binary123.bin'",
+            import_callback=import_callback_empty_file_encode,
+            native_callbacks=native_callbacks,
+        )
+        self.assertEqual(json_str, "[ ]\n")
+
+    def test_double_import(self):
+        json_str = _jsonnet.evaluate_snippet(
+            self.test_filename,
+            "local x = import 'trivial.jsonnet';\n" +
+            "local y = import 'trivial.jsonnet';\n" +
+            "x + y",
+            import_callback=import_callback_encode,
             native_callbacks=native_callbacks,
         )
-        self.assertEqual(json_str, self.expected_str)
+        self.assertEqual(json_str, "84\n")
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/python/test.jsonnet b/python/testdata/basic_check.jsonnet
similarity index 100%
rename from python/test.jsonnet
rename to python/testdata/basic_check.jsonnet
diff --git a/python/testdata/binary123.bin b/python/testdata/binary123.bin
new file mode 100644
index 0000000..aed2973
--- /dev/null
+++ b/python/testdata/binary123.bin
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/python/testdata/binary1230123.bin b/python/testdata/binary1230123.bin
new file mode 100644
index 0000000..33de9f2
Binary files /dev/null and b/python/testdata/binary1230123.bin differ
diff --git a/python/testdata/trivial.jsonnet b/python/testdata/trivial.jsonnet
new file mode 100644
index 0000000..199eb08
--- /dev/null
+++ b/python/testdata/trivial.jsonnet
@@ -0,0 +1,2 @@
+// used for testing imports
+42
diff --git a/python/testdata/trivial_no_eol.jsonnet b/python/testdata/trivial_no_eol.jsonnet
new file mode 100644
index 0000000..463181b
--- /dev/null
+++ b/python/testdata/trivial_no_eol.jsonnet
@@ -0,0 +1,2 @@
+// used for testing imports
+42
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 0a2748a..ab6b431 100644
--- a/setup.py
+++ b/setup.py
@@ -78,6 +78,9 @@ jsonnet_ext = Extension(
 
 setup(name='jsonnet',
       url='https://jsonnet.org',
+      project_urls={
+        'Source': 'https://github.com/google/jsonnet',
+      },
       description='Python bindings for Jsonnet - The data templating language ',
       license="Apache License 2.0",
       author='David Cunningham',
diff --git a/stdlib/std.jsonnet b/stdlib/std.jsonnet
index 7e965af..4615fa6 100644
--- a/stdlib/std.jsonnet
+++ b/stdlib/std.jsonnet
@@ -110,26 +110,38 @@ limitations under the License.
     parse_nat(str, 16),
 
   split(str, c)::
-    assert std.isString(str) : 'std.split first parameter should be a string, got ' + std.type(str);
-    assert std.isString(c) : 'std.split second parameter should be a string, got ' + std.type(c);
-    assert std.length(c) == 1 : 'std.split second parameter should have length 1, got ' + std.length(c);
+    assert std.isString(str) : 'std.split first parameter must be a String, got ' + std.type(str);
+    assert std.isString(c) : 'std.split second parameter must be a String, got ' + std.type(c);
+    assert std.length(c) >= 1 : 'std.split second parameter must have length 1 or greater, got ' + std.length(c);
     std.splitLimit(str, c, -1),
 
   splitLimit(str, c, maxsplits)::
-    assert std.isString(str) : 'std.splitLimit first parameter should be a string, got ' + std.type(str);
-    assert std.isString(c) : 'std.splitLimit second parameter should be a string, got ' + std.type(c);
-    assert std.length(c) == 1 : 'std.splitLimit second parameter should have length 1, got ' + std.length(c);
-    assert std.isNumber(maxsplits) : 'std.splitLimit third parameter should be a number, got ' + std.type(maxsplits);
-    local aux(str, delim, i, arr, v) =
-      local c = str[i];
-      local i2 = i + 1;
-      if i >= std.length(str) then
-        arr + [v]
-      else if c == delim && (maxsplits == -1 || std.length(arr) < maxsplits) then
-        aux(str, delim, i2, arr + [v], '') tailstrict
+    assert std.isString(str) : 'str.splitLimit first parameter must be a String, got ' + std.type(str);
+    assert std.isString(c) : 'str.splitLimit second parameter must be a String, got ' + std.type(c);
+    assert std.length(c) >= 1 : 'std.splitLimit second parameter must have length 1 or greater, got ' + std.length(c);
+    assert std.isNumber(maxsplits) : 'str.splitLimit third parameter must be a Number, got ' + std.type(maxsplits);
+    local strLen = std.length(str);
+    local cLen = std.length(c);
+    local aux(idx, ret, val) =
+      if idx >= strLen then
+        ret + [val]
+      else if str[idx : idx + cLen : 1] == c &&
+              (maxsplits == -1 || std.length(ret) < maxsplits) then
+        aux(idx + cLen, ret + [val], '')
       else
-        aux(str, delim, i2, arr, v + c) tailstrict;
-    aux(str, c, 0, [], ''),
+        aux(idx + 1, ret, val + str[idx]);
+    aux(0, [], ''),
+
+  splitLimitR(str, c, maxsplits)::
+    assert std.isString(str) : 'str.splitLimitR first parameter must be a String, got ' + std.type(str);
+    assert std.isString(c) : 'str.splitLimitR second parameter must be a String, got ' + std.type(c);
+    assert std.length(c) >= 1 : 'std.splitLimitR second parameter must have length 1 or greater, got ' + std.length(c);
+    assert std.isNumber(maxsplits) : 'str.splitLimitR third parameter must be a Number, got ' + std.type(maxsplits);
+    if maxsplits == -1 then
+      std.splitLimit(str, c, -1)
+    else
+      local revStr(str) = std.join('', std.reverse(std.stringChars(str)));
+      std.map(function(e) revStr(e), std.reverse(std.splitLimit(revStr(str), revStr(c), maxsplits))),
 
   strReplace(str, from, to)::
     assert std.isString(str);
@@ -881,7 +893,7 @@ limitations under the License.
       escapeKeyToml(key) =
         local bare_allowed = std.set(std.stringChars("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-"));
         if std.setUnion(std.set(std.stringChars(key)), bare_allowed) == bare_allowed then key else escapeStringToml(key),
-      isTableArray(v) = std.isArray(v) && std.length(v) > 0 && std.foldl(function(a, b) a && std.isObject(b), v, true),
+      isTableArray(v) = std.isArray(v) && std.length(v) > 0 && std.all(std.map(std.isObject, v)),
       isSection(v) = std.isObject(v) || isTableArray(v),
       renderValue(v, indexedPath, inline, cindent) =
         if v == true then
@@ -1298,7 +1310,7 @@ limitations under the License.
   base64(input)::
     local bytes =
       if std.isString(input) then
-        std.map(function(c) std.codepoint(c), input)
+        std.map(std.codepoint, input)
       else
         input;
 
@@ -1335,7 +1347,7 @@ limitations under the License.
           base64_table[(arr[i + 2] & 63)];
         aux(arr, i + 3, r + str) tailstrict;
 
-    local sanity = std.foldl(function(r, a) r && (a < 256), bytes, true);
+    local sanity = std.all([a < 256 for a in bytes]);
     if !sanity then
       error 'Can only base64 encode strings / arrays of single bytes.'
     else
@@ -1365,7 +1377,7 @@ limitations under the License.
 
   base64Decode(str)::
     local bytes = std.base64DecodeBytes(str);
-    std.join('', std.map(function(b) std.char(b), bytes)),
+    std.join('', std.map(std.char, bytes)),
 
   reverse(arr)::
     local l = std.length(arr);
@@ -1594,19 +1606,49 @@ limitations under the License.
     else
       std.filter(function(i) arr[i] == value, std.range(0, std.length(arr) - 1)),
 
+  all(arr)::
+    assert std.isArray(arr) : 'all() parameter should be an array, got ' + std.type(arr);
+    local arrLen = std.length(arr);
+    local aux(idx) =
+      if idx >= arrLen then
+        true
+      else
+        local e = arr[idx];
+        assert std.isBoolean(e) : std.format('element "%s" of type %s is not a boolean', e, std.type(e));
+        if !e then
+          false
+        else
+          aux(idx + 1) tailstrict;
+    aux(0),
+
+  any(arr)::
+    assert std.isArray(arr) : 'any() parameter should be an array, got ' + std.type(arr);
+    local arrLen = std.length(arr);
+    local aux(idx) =
+      if idx >= arrLen then
+        false
+      else
+        local e = arr[idx];
+        assert std.isBoolean(e) : std.format('element "%s" of type %s is not a boolean', e, std.type(e));
+        if e then
+          true
+        else
+          aux(idx + 1) tailstrict;
+    aux(0),
+
   // Three way comparison.
   // TODO(sbarzowski): consider exposing and documenting it properly
   __compare(v1, v2)::
-      local t1 = std.type(v1), t2 = std.type(v2);
-      if t1 != t2 then
-        error "Comparison requires matching types. Got " + t1 + " and " + t2
-      else if t1 == "array" then
-        std.__compare_array(v1, v2)
-      else if t1 == "function" || t1 == "object" || t1 == "bool" then
-        error "Values of type " + t1 + " are not comparable."
-      else if v1 < v2 then -1
-      else if v1 > v2 then 1
-      else 0,
+    local t1 = std.type(v1), t2 = std.type(v2);
+    if t1 != t2 then
+      error 'Comparison requires matching types. Got ' + t1 + ' and ' + t2
+    else if t1 == 'array' then
+      std.__compare_array(v1, v2)
+    else if t1 == 'function' || t1 == 'object' || t1 == 'boolean' then
+      error 'Values of type ' + t1 + ' are not comparable.'
+    else if v1 < v2 then -1
+    else if v1 > v2 then 1
+    else 0,
 
   __compare_array(arr1, arr2)::
     local len1 = std.length(arr1), len2 = std.length(arr2);
diff --git a/test_cmd/fmt_help.golden.stdout b/test_cmd/fmt_help.golden.stdout
index fc51499..c18ae0f 100644
--- a/test_cmd/fmt_help.golden.stdout
+++ b/test_cmd/fmt_help.golden.stdout
@@ -1,4 +1,4 @@
-Jsonnet reformatter v0.18.0
+Jsonnet reformatter v0.19.1
 
 jsonnetfmt {<option>} { <filename> }
 
diff --git a/test_cmd/fmt_help.golden.stdout.golang b/test_cmd/fmt_help.golden.stdout.golang
index c29f44b..3eda169 100644
--- a/test_cmd/fmt_help.golden.stdout.golang
+++ b/test_cmd/fmt_help.golden.stdout.golang
@@ -1,4 +1,4 @@
-Jsonnet reformatter v0.18.0
+Jsonnet reformatter v0.19.1
 
 jsonnetfmt {<option>} { <filename> }
 
diff --git a/test_cmd/fmt_no_args.golden.stderr b/test_cmd/fmt_no_args.golden.stderr
index 3ccfbce..e71a907 100644
--- a/test_cmd/fmt_no_args.golden.stderr
+++ b/test_cmd/fmt_no_args.golden.stderr
@@ -1,6 +1,6 @@
 ERROR: must give filename
 
-Jsonnet reformatter v0.18.0
+Jsonnet reformatter v0.19.1
 
 jsonnetfmt {<option>} { <filename> }
 
diff --git a/test_cmd/fmt_no_args.golden.stderr.golang b/test_cmd/fmt_no_args.golden.stderr.golang
index 114b82b..7b00a18 100644
--- a/test_cmd/fmt_no_args.golden.stderr.golang
+++ b/test_cmd/fmt_no_args.golden.stderr.golang
@@ -1,6 +1,6 @@
 ERROR: must give filename
 
-Jsonnet reformatter v0.18.0
+Jsonnet reformatter v0.19.1
 
 jsonnetfmt {<option>} { <filename> }
 
diff --git a/test_cmd/fmt_version1.golden.stdout b/test_cmd/fmt_version1.golden.stdout
index c9ef418..6d3c47b 100644
--- a/test_cmd/fmt_version1.golden.stdout
+++ b/test_cmd/fmt_version1.golden.stdout
@@ -1 +1 @@
-Jsonnet reformatter v0.18.0
+Jsonnet reformatter v0.19.1
diff --git a/test_cmd/fmt_version2.golden.stdout b/test_cmd/fmt_version2.golden.stdout
index c9ef418..6d3c47b 100644
--- a/test_cmd/fmt_version2.golden.stdout
+++ b/test_cmd/fmt_version2.golden.stdout
@@ -1 +1 @@
-Jsonnet reformatter v0.18.0
+Jsonnet reformatter v0.19.1
diff --git a/test_cmd/help.golden.stdout.cpp b/test_cmd/help.golden.stdout.cpp
index 28c3d03..ac1330e 100644
--- a/test_cmd/help.golden.stdout.cpp
+++ b/test_cmd/help.golden.stdout.cpp
@@ -1,4 +1,4 @@
-Jsonnet commandline interpreter v0.18.0
+Jsonnet commandline interpreter v0.19.1
 
 jsonnet {<option>} <filename>
 
diff --git a/test_cmd/help.golden.stdout.golang b/test_cmd/help.golden.stdout.golang
index 909f325..46bb1e8 100644
--- a/test_cmd/help.golden.stdout.golang
+++ b/test_cmd/help.golden.stdout.golang
@@ -1,4 +1,4 @@
-Jsonnet commandline interpreter (Go implementation) v0.18.0
+Jsonnet commandline interpreter (Go implementation) v0.19.1
 
 jsonnet {<option>} <filename>
 
diff --git a/test_cmd/jpath10.golden.stdout b/test_cmd/jpath10.golden.stdout
new file mode 100644
index 0000000..50f5f1c
--- /dev/null
+++ b/test_cmd/jpath10.golden.stdout
@@ -0,0 +1,7 @@
+[
+   108,
+   105,
+   98,
+   50,
+   10
+]
diff --git a/test_cmd/jpath9.golden.stdout b/test_cmd/jpath9.golden.stdout
new file mode 100644
index 0000000..3d2a8e8
--- /dev/null
+++ b/test_cmd/jpath9.golden.stdout
@@ -0,0 +1,7 @@
+[
+   108,
+   105,
+   98,
+   49,
+   10
+]
diff --git a/test_cmd/jsonnet_path1.golden.stdout b/test_cmd/jsonnet_path1.golden.stdout
index 9b27c4a..f2a42af 100644
--- a/test_cmd/jsonnet_path1.golden.stdout
+++ b/test_cmd/jsonnet_path1.golden.stdout
@@ -1 +1,10 @@
-"lib1\n"
+[
+   "lib1\n",
+   [
+      108,
+      105,
+      98,
+      49,
+      10
+   ]
+]
diff --git a/test_cmd/jsonnet_path2.golden.stdout b/test_cmd/jsonnet_path2.golden.stdout
index a18a460..dcaad6e 100644
--- a/test_cmd/jsonnet_path2.golden.stdout
+++ b/test_cmd/jsonnet_path2.golden.stdout
@@ -1 +1,10 @@
-"lib2\n"
+[
+   "lib2\n",
+   [
+      108,
+      105,
+      98,
+      50,
+      10
+   ]
+]
diff --git a/test_cmd/no_args.golden.stderr.cpp b/test_cmd/no_args.golden.stderr.cpp
index 86402b2..c7c1f37 100644
--- a/test_cmd/no_args.golden.stderr.cpp
+++ b/test_cmd/no_args.golden.stderr.cpp
@@ -1,6 +1,6 @@
 ERROR: must give filename
 
-Jsonnet commandline interpreter v0.18.0
+Jsonnet commandline interpreter v0.19.1
 
 jsonnet {<option>} <filename>
 
diff --git a/test_cmd/no_args.golden.stderr.golang b/test_cmd/no_args.golden.stderr.golang
index 134645d..ad73c73 100644
--- a/test_cmd/no_args.golden.stderr.golang
+++ b/test_cmd/no_args.golden.stderr.golang
@@ -1,6 +1,6 @@
 ERROR: must give filename
 
-Jsonnet commandline interpreter (Go implementation) v0.18.0
+Jsonnet commandline interpreter (Go implementation) v0.19.1
 
 jsonnet {<option>} <filename>
 
diff --git a/test_cmd/run_cmd_tests.sh b/test_cmd/run_cmd_tests.sh
index 791f581..4aa7df5 100755
--- a/test_cmd/run_cmd_tests.sh
+++ b/test_cmd/run_cmd_tests.sh
@@ -60,6 +60,8 @@ do_test "jpath5" 0 -J "lib2" -J "lib1" -e 'import "lib2_test.jsonnet"'
 do_test "jpath6" 0 -J "lib2" -J "lib1" -e 'importstr "shared.txt"'
 do_test "jpath7" 0 -J "lib1" -J "lib2" -e 'importstr "shared.txt"'
 do_test "jpath8" 1 -J "" -e 'true'
+do_test "jpath9" 0 -J "lib2" -J "lib1" -e 'importbin "shared.txt"'
+do_test "jpath10" 0 -J "lib1" -J "lib2" -e 'importbin "shared.txt"'
 do_test "ext1" 0 --ext-str x=1 -e 'std.extVar("x")'
 do_test "ext2" 0 -V x=1 -e 'std.extVar("x")'
 do_test "ext3" 1 -V y=1 -e 'std.extVar("x")'
@@ -105,9 +107,9 @@ do_test "string1" 0 -S -e '"A long\nparagraph."'
 do_test "string2" 1 -S -e 'null'
 
 export JSONNET_PATH=lib1:lib2
-do_test "jsonnet_path1" 0 -e 'importstr "shared.txt"'
+do_test "jsonnet_path1" 0 -e '[importstr "shared.txt", importbin "shared.txt"]'
 export JSONNET_PATH=lib2:lib1
-do_test "jsonnet_path2" 0 -e 'importstr "shared.txt"'
+do_test "jsonnet_path2" 0 -e '[importstr "shared.txt", importbin "shared.txt"]'
 
 if [ -z "$DISABLE_FMT_TESTS" ]; then
 
diff --git a/test_cmd/version1.golden.stdout.cpp b/test_cmd/version1.golden.stdout.cpp
index a1962a0..bd22510 100644
--- a/test_cmd/version1.golden.stdout.cpp
+++ b/test_cmd/version1.golden.stdout.cpp
@@ -1 +1 @@
-Jsonnet commandline interpreter v0.18.0
+Jsonnet commandline interpreter v0.19.1
diff --git a/test_cmd/version1.golden.stdout.golang b/test_cmd/version1.golden.stdout.golang
index fe9d047..f8dcb3b 100644
--- a/test_cmd/version1.golden.stdout.golang
+++ b/test_cmd/version1.golden.stdout.golang
@@ -1 +1 @@
-Jsonnet commandline interpreter (Go implementation) v0.18.0
+Jsonnet commandline interpreter (Go implementation) v0.19.1
diff --git a/test_cmd/version2.golden.stdout.cpp b/test_cmd/version2.golden.stdout.cpp
index a1962a0..bd22510 100644
--- a/test_cmd/version2.golden.stdout.cpp
+++ b/test_cmd/version2.golden.stdout.cpp
@@ -1 +1 @@
-Jsonnet commandline interpreter v0.18.0
+Jsonnet commandline interpreter v0.19.1
diff --git a/test_cmd/version2.golden.stdout.golang b/test_cmd/version2.golden.stdout.golang
index fe9d047..f8dcb3b 100644
--- a/test_cmd/version2.golden.stdout.golang
+++ b/test_cmd/version2.golden.stdout.golang
@@ -1 +1 @@
-Jsonnet commandline interpreter (Go implementation) v0.18.0
+Jsonnet commandline interpreter (Go implementation) v0.19.1
diff --git a/test_suite/dos_line_endings.jsonnet b/test_suite/dos_line_endings.jsonnet
index 6fe1d43..12d17ea 100644
--- a/test_suite/dos_line_endings.jsonnet
+++ b/test_suite/dos_line_endings.jsonnet
@@ -1,22 +1,22 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-{
-  text: |||
-    A paragraph
-    of text.
-  |||,
-}
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+{
+  text: |||
+    A paragraph
+    of text.
+  |||,
+}
diff --git a/test_suite/import.jsonnet b/test_suite/import.jsonnet
index 0467a13..2a07738 100644
--- a/test_suite/import.jsonnet
+++ b/test_suite/import.jsonnet
@@ -31,6 +31,9 @@ std.assertEqual(local A = 7, lib = import 'lib/A_20.libsonnet'; lib, 20) &&
 std.assertEqual(importstr 'lib/some_file.txt', 'Hello World!\n') &&
 std.assertEqual(importstr 'lib/some_file.txt', 'Hello World!\n') &&
 
+std.assertEqual(importbin 'lib/nonutf8.bin', [255, 0, 254]) &&
+std.assertEqual(importbin 'lib/nonutf8.bin', [255, 0, 254]) &&
+
 std.assertEqual(import 'lib/rel_path.libsonnet', 'rel_path') &&
 std.assertEqual(import 'lib/rel_path4.libsonnet', 'rel_path') &&
 
diff --git a/test_suite/lib/nonutf8.bin b/test_suite/lib/nonutf8.bin
new file mode 100644
index 0000000..90db00e
Binary files /dev/null and b/test_suite/lib/nonutf8.bin differ
diff --git a/test_suite/stdlib.jsonnet b/test_suite/stdlib.jsonnet
index 669c1f9..f4b8b59 100644
--- a/test_suite/stdlib.jsonnet
+++ b/test_suite/stdlib.jsonnet
@@ -480,9 +480,20 @@ std.assertEqual(std.extVar('var2') { x+: 2 }.x, 3) &&
 
 std.assertEqual(std.split('foo/bar', '/'), ['foo', 'bar']) &&
 std.assertEqual(std.split('/foo/', '/'), ['', 'foo', '']) &&
+std.assertEqual(std.split('foo/_bar', '/_'), ['foo', 'bar']) &&
+std.assertEqual(std.split('/_foo/_', '/_'), ['', 'foo', '']) &&
 
 std.assertEqual(std.splitLimit('foo/bar', '/', 1), ['foo', 'bar']) &&
 std.assertEqual(std.splitLimit('/foo/', '/', 1), ['', 'foo/']) &&
+std.assertEqual(std.splitLimit('foo/_bar', '/_', 1), ['foo', 'bar']) &&
+std.assertEqual(std.splitLimit('/_foo/_', '/_', 1), ['', 'foo/_']) &&
+
+std.assertEqual(std.splitLimitR('foo/bar', '/', 1), ['foo', 'bar']) &&
+std.assertEqual(std.splitLimitR('/foo/', '/', 1), ['/foo', '']) &&
+std.assertEqual(std.splitLimitR('/foo/', '/', -1), ['', 'foo', '']) &&
+std.assertEqual(std.splitLimitR('foo/_bar', '/_', 1), ['foo', 'bar']) &&
+std.assertEqual(std.splitLimitR('/_foo/_', '/_', 1), ['/_foo', '']) &&
+std.assertEqual(std.splitLimitR('/_foo/_', '/_', -1), ['', 'foo', '']) &&
 
 local some_toml = {
   key: 'value',
@@ -1505,4 +1516,13 @@ std.assertEqual(std.decodeUTF8([65 + 1 - 1]), 'A') &&
 std.assertEqual(std.decodeUTF8([90, 97, 197, 188, 195, 179, 197, 130, 196, 135, 32, 103, 196, 153, 197, 155, 108, 196, 133, 32, 106, 97, 197, 186, 197, 132]), 'Zażółć gęślą jaźń') &&
 std.assertEqual(std.decodeUTF8([240, 159, 152, 131]), '😃') &&
 
+
+std.assertEqual(std.any([true, false]), true) &&
+std.assertEqual(std.any([false, false]), false) &&
+std.assertEqual(std.any([]), false) &&
+
+std.assertEqual(std.all([true, false]), false) &&
+std.assertEqual(std.all([true, true]), true) &&
+std.assertEqual(std.all([]), true) &&
+
 true
diff --git a/tests.sh b/tests.sh
index 0876eaa..c53ca0f 100755
--- a/tests.sh
+++ b/tests.sh
@@ -3,13 +3,13 @@ set -e
 
 JSONNET_BIN="${JSONNET_BIN:-./jsonnet}"
 TEST_SNIPPET="std.assertEqual(({ x: 1, y: self.x } { x: 2 }).y, 2)"
-echo -n "snippet: "
+printf "snippet: "
 "$JSONNET_BIN" -e "${TEST_SNIPPET}" || FAIL=TRUE
 
 if [ -z "$DISABLE_LIB_TESTS" ]; then
-    echo -n 'libjsonnet_test_snippet: '
+    printf 'libjsonnet_test_snippet: '
     LD_LIBRARY_PATH=. ./libjsonnet_test_snippet "${TEST_SNIPPET}" || FAIL=TRUE
-    echo -n 'libjsonnet_test_file: '
+    printf 'libjsonnet_test_file: '
     LD_LIBRARY_PATH=. ./libjsonnet_test_file "test_suite/object.jsonnet" || FAIL=TRUE
 fi
 examples/check.sh || FAIL=TRUE
diff --git a/tools/scripts/push_docs.sh b/tools/scripts/push_docs.sh
index 8fc9eb4..56d813c 100755
--- a/tools/scripts/push_docs.sh
+++ b/tools/scripts/push_docs.sh
@@ -45,7 +45,7 @@ if [ ! -r 'doc/_config.yml' ]; then
 fi
 
 if [ ! -r 'doc/js/libjsonnet.wasm' ]; then
-  echo 'Cannot push as docs/js/libjsonnet.wasm has not been built.' >&1
+  echo 'Cannot push as doc/js/libjsonnet.wasm has not been built.' >&1
   exit 1
 fi
 

More details

Full run details